diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index 680451695422..c3767d4d01a6 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -1566,7 +1566,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw KernelVersion: 4.3 Contact: linux-iio@vger.kernel.org Description: - Raw (unscaled no offset etc.) percentage reading of a substance. + Raw (unscaled no offset etc.) reading of a substance. Units + after application of scale and offset are percents. What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt index b6a7e7397b8b..b944fe067188 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt @@ -16,6 +16,9 @@ Required properties: Documentation/devicetree/bindings/graph.txt. This port should be connected to the input port of an attached HDMI or LVDS encoder chip. +Optional properties: +- pinctrl-names: Contain "default" and "sleep". + Example: dpi0: dpi@1401d000 { @@ -26,6 +29,9 @@ dpi0: dpi@1401d000 { <&mmsys CLK_MM_DPI_ENGINE>, <&apmixedsys CLK_APMIXED_TVDPLL>; clock-names = "pixel", "engine", "pll"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&dpi_pin_func>; + pinctrl-1 = <&dpi_pin_idle>; port { dpi0_out: endpoint { diff --git a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt index c82794002595..89647d714387 100644 --- a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt +++ b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt @@ -21,7 +21,7 @@ controller state. The mux controller state is described in Example: mux: mux-controller { - compatible = "mux-gpio"; + compatible = "gpio-mux"; #mux-control-cells = <0>; mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>, diff --git a/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt b/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt index 4438432bfe9b..ad76edccf881 100644 --- a/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt +++ b/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt @@ -87,7 +87,7 @@ Example: ranges; /* APU<->RPU0 IPI mailbox controller */ - ipi_mailbox_rpu0: mailbox@ff90400 { + ipi_mailbox_rpu0: mailbox@ff990400 { reg = <0xff990400 0x20>, <0xff990420 0x20>, <0xff990080 0x20>, diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt index 8a532f4453f2..09aecec47003 100644 --- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt +++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt @@ -49,6 +49,8 @@ Optional properties: error caused by stop clock(fifo full) Valid range = [0:0x7]. if not present, default value is 0. applied to compatible "mediatek,mt2701-mmc". +- resets: Phandle and reset specifier pair to softreset line of MSDC IP. +- reset-names: Should be "hrst". Examples: mmc0: mmc@11230000 { diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt index 2cf3affa1be7..96c0b1440c9c 100644 --- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt +++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt @@ -15,8 +15,15 @@ Required properties: - "nvidia,tegra210-sdhci": for Tegra210 - "nvidia,tegra186-sdhci": for Tegra186 - "nvidia,tegra194-sdhci": for Tegra194 -- clocks : Must contain one entry, for the module clock. - See ../clocks/clock-bindings.txt for details. +- clocks: For Tegra210, Tegra186 and Tegra194 must contain two entries. + One for the module clock and one for the timeout clock. + For all other Tegra devices, must contain a single entry for + the module clock. See ../clocks/clock-bindings.txt for details. +- clock-names: For Tegra210, Tegra186 and Tegra194 must contain the + strings 'sdhci' and 'tmclk' to represent the module and + the timeout clocks, respectively. + For all other Tegra devices must contain the string 'sdhci' + to represent the module clock. - resets : Must contain an entry for each entry in reset-names. See ../reset/reset.txt for details. - reset-names : Must include the following entries: @@ -99,7 +106,7 @@ Optional properties for Tegra210, Tegra186 and Tegra194: Example: sdhci@700b0000 { - compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci"; + compatible = "nvidia,tegra124-sdhci"; reg = <0x0 0x700b0000 0x0 0x200>; interrupts = ; clocks = <&tegra_car TEGRA210_CLK_SDMMC1>; @@ -115,3 +122,22 @@ sdhci@700b0000 { nvidia,pad-autocal-pull-down-offset-1v8 = <0x7b>; status = "disabled"; }; + +sdhci@700b0000 { + compatible = "nvidia,tegra210-sdhci"; + reg = <0x0 0x700b0000 0x0 0x200>; + interrupts = ; + clocks = <&tegra_car TEGRA210_CLK_SDMMC1>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; + resets = <&tegra_car 14>; + reset-names = "sdhci"; + pinctrl-names = "sdmmc-3v3", "sdmmc-1v8"; + pinctrl-0 = <&sdmmc1_3v3>; + pinctrl-1 = <&sdmmc1_1v8>; + nvidia,pad-autocal-pull-up-offset-3v3 = <0x00>; + nvidia,pad-autocal-pull-down-offset-3v3 = <0x7d>; + nvidia,pad-autocal-pull-up-offset-1v8 = <0x7b>; + nvidia,pad-autocal-pull-down-offset-1v8 = <0x7b>; + status = "disabled"; +}; diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt index e327abd0da60..70876c55a11c 100644 --- a/Documentation/devicetree/bindings/usb/dwc3.txt +++ b/Documentation/devicetree/bindings/usb/dwc3.txt @@ -89,6 +89,8 @@ Optional properties: from P0 to P1/P2/P3 without delay. - snps,dis-tx-ipgap-linecheck-quirk: when set, disable u2mac linestate check during HS transmit. + - snps,parkmode-disable-ss-quirk: when set, all SuperSpeed bus instances in + park mode are disabled. - snps,dis_metastability_quirk: when set, disable metastability workaround. CAUTION: use only if you are absolutely sure of it. - snps,is-utmi-l1-suspend: true when DWC3 asserts output signal diff --git a/Documentation/filesystems/affs.txt b/Documentation/filesystems/affs.txt index 71b63c2b9841..a8f1a58e3692 100644 --- a/Documentation/filesystems/affs.txt +++ b/Documentation/filesystems/affs.txt @@ -93,13 +93,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows: - R maps to r for user, group and others. On directories, R implies x. - - If both W and D are allowed, w will be set. + - W maps to w. - E maps to x. - - H and P are always retained and ignored under Linux. + - D is ignored. - - A is always reset when a file is written to. + - H, S and P are always retained and ignored under Linux. + + - A is cleared when a file is written to. User id and group id will be used unless set[gu]id are given as mount options. Since most of the Amiga file systems are single user systems @@ -111,11 +113,13 @@ Linux -> Amiga: The Linux rwxrwxrwx file mode is handled as follows: - - r permission will set R for user, group and others. + - r permission will allow R for user, group and others. - - w permission will set W and D for user, group and others. + - w permission will allow W for user, group and others. - - x permission of the user will set E for plain files. + - x permission of the user will allow E for plain files. + + - D will be allowed for user, group and others. - All other flags (suid, sgid, ...) are ignored and will not be retained. diff --git a/Documentation/kbuild/index.rst b/Documentation/kbuild/index.rst index 0f144fad99a6..3882bd5f7728 100644 --- a/Documentation/kbuild/index.rst +++ b/Documentation/kbuild/index.rst @@ -19,6 +19,7 @@ Kernel Build System issues reproducible-builds + llvm .. only:: subproject and html diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst index f1e5dce86af7..852ccc551bb3 100644 --- a/Documentation/kbuild/kbuild.rst +++ b/Documentation/kbuild/kbuild.rst @@ -262,3 +262,8 @@ KBUILD_BUILD_USER, KBUILD_BUILD_HOST These two variables allow to override the user@host string displayed during boot and in /proc/version. The default value is the output of the commands whoami and host, respectively. + +LLVM +---- +If this variable is set to 1, Kbuild will use Clang and LLVM utilities instead +of GCC and GNU binutils to build the kernel. diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst new file mode 100644 index 000000000000..c776b6eee969 --- /dev/null +++ b/Documentation/kbuild/llvm.rst @@ -0,0 +1,87 @@ +============================== +Building Linux with Clang/LLVM +============================== + +This document covers how to build the Linux kernel with Clang and LLVM +utilities. + +About +----- + +The Linux kernel has always traditionally been compiled with GNU toolchains +such as GCC and binutils. Ongoing work has allowed for `Clang +`_ and `LLVM `_ utilities to be +used as viable substitutes. Distributions such as `Android +`_, `ChromeOS +`_, and `OpenMandriva +`_ use Clang built kernels. `LLVM is a +collection of toolchain components implemented in terms of C++ objects +`_. Clang is a front-end to LLVM that +supports C and the GNU C extensions required by the kernel, and is pronounced +"klang," not "see-lang." + +Clang +----- + +The compiler used can be swapped out via `CC=` command line argument to `make`. +`CC=` should be set when selecting a config and during a build. + + make CC=clang defconfig + + make CC=clang + +Cross Compiling +--------------- + +A single Clang compiler binary will typically contain all supported backends, +which can help simplify cross compiling. + + ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang + +`CROSS_COMPILE` is not used to prefix the Clang compiler binary, instead +`CROSS_COMPILE` is used to set a command line flag: `--target `. For +example: + + clang --target aarch64-linux-gnu foo.c + +LLVM Utilities +-------------- + +LLVM has substitutes for GNU binutils utilities. Kbuild supports `LLVM=1` +to enable them. + + make LLVM=1 + +They can be enabled individually. The full list of the parameters: + + make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\ + OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \\ + READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \\ + HOSTLD=ld.lld + +Currently, the integrated assembler is disabled by default. You can pass +`LLVM_IAS=1` to enable it. + +Getting Help +------------ + +- `Website `_ +- `Mailing List `_: +- `Issue Tracker `_ +- IRC: #clangbuiltlinux on chat.freenode.net +- `Telegram `_: @ClangBuiltLinux +- `Wiki `_ +- `Beginner Bugs `_ + +Getting LLVM +------------- + +- http://releases.llvm.org/download.html +- https://github.com/llvm/llvm-project +- https://llvm.org/docs/GettingStarted.html +- https://llvm.org/docs/CMake.html +- https://apt.llvm.org/ +- https://www.archlinux.org/packages/extra/x86_64/llvm/ +- https://github.com/ClangBuiltLinux/tc-build +- https://github.com/ClangBuiltLinux/linux/wiki/Building-Clang-from-source +- https://android.googlesource.com/platform/prebuilts/clang/host/linux-x86/ diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt index 4833904d32a5..a18e996fa54b 100644 --- a/Documentation/virt/kvm/api.txt +++ b/Documentation/virt/kvm/api.txt @@ -4444,9 +4444,11 @@ EOI was received. #define KVM_EXIT_HYPERV_SYNIC 1 #define KVM_EXIT_HYPERV_HCALL 2 __u32 type; + __u32 pad1; union { struct { __u32 msr; + __u32 pad2; __u64 control; __u64 evt_page; __u64 msg_page; diff --git a/MAINTAINERS b/MAINTAINERS index ab8ef4498880..b47719da52b7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4028,6 +4028,7 @@ B: https://github.com/ClangBuiltLinux/linux/issues C: irc://chat.freenode.net/clangbuiltlinux S: Supported K: \b(?i:clang|llvm)\b +F: Documentation/kbuild/llvm.rst CLEANCACHE API M: Konrad Rzeszutek Wilk diff --git a/Makefile b/Makefile index 1da2944b842e..d2e46ca4c955 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 4 -SUBLEVEL = 47 +SUBLEVEL = 67 EXTRAVERSION = NAME = Kleptomaniac Octopus @@ -394,8 +394,13 @@ HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null) HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null) HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null) -HOSTCC = gcc -HOSTCXX = g++ +ifneq ($(LLVM),) +HOSTCC = clang +HOSTCXX = clang++ +else +HOSTCC = gcc +HOSTCXX = g++ +endif KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \ -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \ $(HOSTCFLAGS) @@ -404,16 +409,28 @@ KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS) KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS) # Make variables (CC, etc...) -AS = $(CROSS_COMPILE)as -LD = $(CROSS_COMPILE)ld -CC = $(CROSS_COMPILE)gcc CPP = $(CC) -E +ifneq ($(LLVM),) +CC = clang +LD = ld.lld +AR = llvm-ar +NM = llvm-nm +OBJCOPY = llvm-objcopy +OBJDUMP = llvm-objdump +READELF = llvm-readelf +OBJSIZE = llvm-size +STRIP = llvm-strip +else +CC = $(CROSS_COMPILE)gcc +LD = $(CROSS_COMPILE)ld AR = $(CROSS_COMPILE)ar NM = $(CROSS_COMPILE)nm -STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump +READELF = $(CROSS_COMPILE)readelf OBJSIZE = $(CROSS_COMPILE)size +STRIP = $(CROSS_COMPILE)strip +endif PAHOLE = pahole LEX = flex YACC = bison @@ -422,10 +439,15 @@ INSTALLKERNEL := installkernel DEPMOD = /sbin/depmod PERL = perl PYTHON = python -PYTHON2 = python2 PYTHON3 = python3 CHECK = sparse BASH = bash +KGZIP = gzip +KBZIP2 = bzip2 +KLZOP = lzop +LZMA = lzma +LZ4 = lz4c +XZ = xz CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF) @@ -471,9 +493,10 @@ KBUILD_LDFLAGS := GCC_PLUGINS_CFLAGS := CLANG_FLAGS := -export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE PAHOLE LEX YACC AWK INSTALLKERNEL -export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX +export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC +export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL +export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX +export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS @@ -528,13 +551,13 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) ifneq ($(CROSS_COMPILE),) CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) -CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) endif ifneq ($(GCC_TOOLCHAIN),) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif -ifeq ($(shell $(AS) --version 2>&1 | head -n 1 | grep clang),) +ifneq ($(LLVM_IAS),1) CLANG_FLAGS += -no-integrated-as endif CLANG_FLAGS += -Werror=unknown-warning-option @@ -587,12 +610,8 @@ KBUILD_MODULES := KBUILD_BUILTIN := 1 # If we have only "make modules", don't compile built-in objects. -# When we're building modules with modversions, we need to consider -# the built-in objects during the descend as well, in order to -# make sure the checksums are up to date before we record them. - ifeq ($(MAKECMDGOALS),modules) - KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) + KBUILD_BUILTIN := endif # If we have "make modules", compile modules @@ -985,10 +1004,10 @@ export mod_strip_cmd mod_compress_cmd = true ifdef CONFIG_MODULE_COMPRESS ifdef CONFIG_MODULE_COMPRESS_GZIP - mod_compress_cmd = gzip -n -f + mod_compress_cmd = $(KGZIP) -n -f endif # CONFIG_MODULE_COMPRESS_GZIP ifdef CONFIG_MODULE_COMPRESS_XZ - mod_compress_cmd = xz -f + mod_compress_cmd = $(XZ) -f endif # CONFIG_MODULE_COMPRESS_XZ endif # CONFIG_MODULE_COMPRESS export mod_compress_cmd @@ -1282,6 +1301,13 @@ ifdef CONFIG_MODULES all: modules +# When we're building modules with modversions, we need to consider +# the built-in objects during the descend as well, in order to +# make sure the checksums are up to date before we record them. +ifdef CONFIG_MODVERSIONS + KBUILD_BUILTIN := 1 +endif + # Build modules # # A module can be listed more than once in obj-m resulting in diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig index f4ec420d7f2d..3a132c91d45b 100644 --- a/arch/alpha/configs/defconfig +++ b/arch/alpha/configs/defconfig @@ -36,7 +36,6 @@ CONFIG_BLK_DEV_CY82C693=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_SCSI_AIC7XXX=m CONFIG_AIC7XXX_CMDS_PER_DEVICE=253 # CONFIG_AIC7XXX_DEBUG_ENABLE is not set diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index af2c0063dc75..103270d5a9fc 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -322,14 +322,18 @@ static inline int __is_mmio(const volatile void __iomem *addr) #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) extern inline unsigned int ioread8(void __iomem *addr) { - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); + unsigned int ret; + mb(); + ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); mb(); return ret; } extern inline unsigned int ioread16(void __iomem *addr) { - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); + unsigned int ret; + mb(); + ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); mb(); return ret; } @@ -370,7 +374,9 @@ extern inline void outw(u16 b, unsigned long port) #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) extern inline unsigned int ioread32(void __iomem *addr) { - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); + unsigned int ret; + mb(); + ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); mb(); return ret; } @@ -415,14 +421,18 @@ extern inline void __raw_writew(u16 b, volatile void __iomem *addr) extern inline u8 readb(const volatile void __iomem *addr) { - u8 ret = __raw_readb(addr); + u8 ret; + mb(); + ret = __raw_readb(addr); mb(); return ret; } extern inline u16 readw(const volatile void __iomem *addr) { - u16 ret = __raw_readw(addr); + u16 ret; + mb(); + ret = __raw_readw(addr); mb(); return ret; } @@ -463,14 +473,18 @@ extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) extern inline u32 readl(const volatile void __iomem *addr) { - u32 ret = __raw_readl(addr); + u32 ret; + mb(); + ret = __raw_readl(addr); mb(); return ret; } extern inline u64 readq(const volatile void __iomem *addr) { - u64 ret = __raw_readq(addr); + u64 ret; + mb(); + ret = __raw_readq(addr); mb(); return ret; } @@ -488,10 +502,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) } #endif -#define ioread16be(p) be16_to_cpu(ioread16(p)) -#define ioread32be(p) be32_to_cpu(ioread32(p)) -#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p)) -#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p)) +#define ioread16be(p) swab16(ioread16(p)) +#define ioread32be(p) swab32(ioread32(p)) +#define iowrite16be(v,p) iowrite16(swab16(v), (p)) +#define iowrite32be(v,p) iowrite32(swab32(v), (p)) #define inb_p inb #define inw_p inw @@ -499,14 +513,44 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) #define outb_p outb #define outw_p outw #define outl_p outl -#define readb_relaxed(addr) __raw_readb(addr) -#define readw_relaxed(addr) __raw_readw(addr) -#define readl_relaxed(addr) __raw_readl(addr) -#define readq_relaxed(addr) __raw_readq(addr) -#define writeb_relaxed(b, addr) __raw_writeb(b, addr) -#define writew_relaxed(b, addr) __raw_writew(b, addr) -#define writel_relaxed(b, addr) __raw_writel(b, addr) -#define writeq_relaxed(b, addr) __raw_writeq(b, addr) + +extern u8 readb_relaxed(const volatile void __iomem *addr); +extern u16 readw_relaxed(const volatile void __iomem *addr); +extern u32 readl_relaxed(const volatile void __iomem *addr); +extern u64 readq_relaxed(const volatile void __iomem *addr); + +#if IO_CONCAT(__IO_PREFIX,trivial_io_bw) +extern inline u8 readb_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readb(addr); +} + +extern inline u16 readw_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readw(addr); +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_io_lq) +extern inline u32 readl_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readl(addr); +} + +extern inline u64 readq_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readq(addr); +} +#endif + +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel +#define writeq_relaxed writeq /* * String version of IO memory access ops: diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c index c025a3e5e357..938de13adfbf 100644 --- a/arch/alpha/kernel/io.c +++ b/arch/alpha/kernel/io.c @@ -16,21 +16,27 @@ unsigned int ioread8(void __iomem *addr) { - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); + unsigned int ret; + mb(); + ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); mb(); return ret; } unsigned int ioread16(void __iomem *addr) { - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); + unsigned int ret; + mb(); + ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); mb(); return ret; } unsigned int ioread32(void __iomem *addr) { - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); + unsigned int ret; + mb(); + ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); mb(); return ret; } @@ -148,28 +154,36 @@ EXPORT_SYMBOL(__raw_writeq); u8 readb(const volatile void __iomem *addr) { - u8 ret = __raw_readb(addr); + u8 ret; + mb(); + ret = __raw_readb(addr); mb(); return ret; } u16 readw(const volatile void __iomem *addr) { - u16 ret = __raw_readw(addr); + u16 ret; + mb(); + ret = __raw_readw(addr); mb(); return ret; } u32 readl(const volatile void __iomem *addr) { - u32 ret = __raw_readl(addr); + u32 ret; + mb(); + ret = __raw_readl(addr); mb(); return ret; } u64 readq(const volatile void __iomem *addr) { - u64 ret = __raw_readq(addr); + u64 ret; + mb(); + ret = __raw_readq(addr); mb(); return ret; } @@ -207,6 +221,38 @@ EXPORT_SYMBOL(writew); EXPORT_SYMBOL(writel); EXPORT_SYMBOL(writeq); +/* + * The _relaxed functions must be ordered w.r.t. each other, but they don't + * have to be ordered w.r.t. other memory accesses. + */ +u8 readb_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readb(addr); +} + +u16 readw_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readw(addr); +} + +u32 readl_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readl(addr); +} + +u64 readq_relaxed(const volatile void __iomem *addr) +{ + mb(); + return __raw_readq(addr); +} + +EXPORT_SYMBOL(readb_relaxed); +EXPORT_SYMBOL(readw_relaxed); +EXPORT_SYMBOL(readl_relaxed); +EXPORT_SYMBOL(readq_relaxed); /* * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index 9acbeba832c0..dcaa44e408ac 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts @@ -88,6 +88,8 @@ arcpct: pct { compatible = "snps,archs-pct"; + interrupt-parent = <&cpu_intc>; + interrupts = <20>; }; /* TIMER0 with interrupt for clockevent */ @@ -208,7 +210,7 @@ reg = <0x8000 0x2000>; interrupts = <10>; interrupt-names = "macirq"; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; snps,pbl = <32>; snps,multicast-filter-bins = <256>; clocks = <&gmacclk>; @@ -226,7 +228,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "snps,dwmac-mdio"; - phy0: ethernet-phy@0 { + phy0: ethernet-phy@0 { /* Micrel KSZ9031 */ reg = <0>; }; }; diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h index c77a0e3671ac..0284ace0e1ab 100644 --- a/arch/arc/include/asm/elf.h +++ b/arch/arc/include/asm/elf.h @@ -19,7 +19,7 @@ #define R_ARC_32_PCREL 0x31 /*to set parameters in the core dumps */ -#define ELF_ARCH EM_ARCOMPACT +#define ELF_ARCH EM_ARC_INUSE #define ELF_CLASS ELFCLASS32 #ifdef CONFIG_CPU_BIG_ENDIAN diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 72be01270e24..ea74a1eee5d9 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -153,7 +153,6 @@ END(EV_Extension) tracesys: ; save EFA in case tracer wants the PC of traced task ; using ERET won't work since next-PC has already committed - lr r12, [efa] GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address @@ -196,15 +195,9 @@ tracesys_exit: ; Breakpoint TRAP ; --------------------------------------------- trap_with_param: - - ; stop_pc info by gdb needs this info - lr r0, [efa] + mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc mov r1, sp - ; Now that we have read EFA, it is safe to do "fake" rtie - ; and get out of CPU exception mode - FAKE_RET_FROM_EXCPN - ; Save callee regs in case gdb wants to have a look ; SP will grow up by size of CALLEE Reg-File ; NOTE: clobbers r12 @@ -231,6 +224,10 @@ ENTRY(EV_Trap) EXCEPTION_PROLOGUE + lr r12, [efa] + + FAKE_RET_FROM_EXCPN + ;============ TRAP 1 :breakpoints ; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR) bmsk.f 0, r10, 7 @@ -238,9 +235,6 @@ ENTRY(EV_Trap) ;============ TRAP (no param): syscall top level - ; First return from Exception to pure K mode (Exception/IRQs renabled) - FAKE_RET_FROM_EXCPN - ; If syscall tracing ongoing, invoke pre-post-hooks GET_CURR_THR_INFO_FLAGS r10 btst r10, TIF_SYSCALL_TRACE diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 661fd842ea97..79849f37e782 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -562,7 +562,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) { struct arc_reg_pct_build pct_bcr; struct arc_reg_cc_build cc_bcr; - int i, has_interrupts; + int i, has_interrupts, irq; int counter_size; /* in bits */ union cc_name { @@ -637,13 +637,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) .attr_groups = arc_pmu->attr_groups, }; - if (has_interrupts) { - int irq = platform_get_irq(pdev, 0); - - if (irq < 0) { - pr_err("Cannot get IRQ number for the platform\n"); - return -ENODEV; - } + if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) { arc_pmu->irq = irq; @@ -652,9 +646,9 @@ static int arc_pmu_device_probe(struct platform_device *pdev) this_cpu_ptr(&arc_pmu_cpu)); on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); - - } else + } else { arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + } /* * perf parser doesn't really like '-' symbol in events name, so let's diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h index a4a61531c7fb..77712c5ffe84 100644 --- a/arch/arc/plat-eznps/include/plat/ctop.h +++ b/arch/arc/plat-eznps/include/plat/ctop.h @@ -33,7 +33,6 @@ #define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C) #define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030) #define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080) -#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088) #define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C) #define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300) diff --git a/arch/arm/boot/deflate_xip_data.sh b/arch/arm/boot/deflate_xip_data.sh index 40937248cebe..304495c3c2c5 100755 --- a/arch/arm/boot/deflate_xip_data.sh +++ b/arch/arm/boot/deflate_xip_data.sh @@ -56,7 +56,7 @@ trap 'rm -f "$XIPIMAGE.tmp"; exit 1' 1 2 3 # substitute the data section by a compressed version $DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp" $DD if="$XIPIMAGE" skip=$data_start iflag=skip_bytes | -gzip -9 >> "$XIPIMAGE.tmp" +$KGZIP -9 >> "$XIPIMAGE.tmp" # replace kernel binary mv -f "$XIPIMAGE.tmp" "$XIPIMAGE" diff --git a/arch/arm/boot/dts/am335x-pocketbeagle.dts b/arch/arm/boot/dts/am335x-pocketbeagle.dts index ff4f919d22f6..abf2badce53d 100644 --- a/arch/arm/boot/dts/am335x-pocketbeagle.dts +++ b/arch/arm/boot/dts/am335x-pocketbeagle.dts @@ -88,7 +88,6 @@ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0) - AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4) /* (B12) mcasp0_aclkr.mmc0_sdwp */ >; }; diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi index 59770dd3785e..bbe15775fccd 100644 --- a/arch/arm/boot/dts/am437x-l4.dtsi +++ b/arch/arm/boot/dts/am437x-l4.dtsi @@ -1576,8 +1576,9 @@ reg-names = "rev"; ti,hwmods = "d_can0"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ - clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>; - clock-names = "fck"; + clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>, + <&dcan0_fck>; + clock-names = "fck", "osc"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0xcc000 0x2000>; @@ -1585,6 +1586,8 @@ dcan0: can@0 { compatible = "ti,am4372-d_can", "ti,am3352-d_can"; reg = <0x0 0x2000>; + clocks = <&dcan0_fck>; + clock-names = "fck"; syscon-raminit = <&scm_conf 0x644 0>; interrupts = ; status = "disabled"; @@ -1597,8 +1600,9 @@ reg-names = "rev"; ti,hwmods = "d_can1"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ - clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>; - clock-names = "fck"; + clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>, + <&dcan1_fck>; + clock-names = "fck", "osc"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0xd0000 0x2000>; @@ -1606,6 +1610,8 @@ dcan1: can@0 { compatible = "ti,am4372-d_can", "ti,am3352-d_can"; reg = <0x0 0x2000>; + clocks = <&dcan1_fck>; + clock-name = "fck"; syscon-raminit = <&scm_conf 0x644 1>; interrupts = ; status = "disabled"; diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 3f4bb44d85f0..669da3a33d82 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -339,7 +339,8 @@ comphy: phy@18300 { compatible = "marvell,armada-380-comphy"; - reg = <0x18300 0x100>; + reg-names = "comphy", "conf"; + reg = <0x18300 0x100>, <0x18460 4>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts index 1333a68b9373..b8db77b7f5d8 100644 --- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts +++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts @@ -40,7 +40,7 @@ ahb { usb0: gadget@300000 { - atmel,vbus-gpio = <&pioA PIN_PA27 GPIO_ACTIVE_HIGH>; + atmel,vbus-gpio = <&pioA PIN_PB11 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usba_vbus>; status = "okay"; diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi index e35398cc60a0..dd71ab08136b 100644 --- a/arch/arm/boot/dts/bcm-hr2.dtsi +++ b/arch/arm/boot/dts/bcm-hr2.dtsi @@ -217,7 +217,7 @@ }; qspi: spi@27200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x027200 0x184>, <0x027000 0x124>, <0x11c408 0x004>, diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index da6d70f09ef1..8615d89fa469 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -257,10 +257,10 @@ status = "disabled"; }; - mailbox: mailbox@25000 { + mailbox: mailbox@25c00 { compatible = "brcm,iproc-fa2-mbox"; - reg = <0x25000 0x445>; - interrupts = ; + reg = <0x25c00 0x400>; + interrupts = ; #mbox-cells = <1>; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; @@ -282,7 +282,7 @@ }; qspi: spi@27200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x027200 0x184>, <0x027000 0x124>, <0x11c408 0x004>, diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 2d9b4dd05830..0016720ce530 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -488,7 +488,7 @@ }; spi@18029200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x18029200 0x184>, <0x18029000 0x124>, <0x1811b408 0x004>, diff --git a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi index ce87d2ff27aa..4b9c4cab0314 100644 --- a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi +++ b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi @@ -68,7 +68,7 @@ i2c_cm36651: i2c-gpio-2 { compatible = "i2c-gpio"; - gpios = <&gpf0 0 GPIO_ACTIVE_LOW>, <&gpf0 1 GPIO_ACTIVE_LOW>; + gpios = <&gpf0 0 GPIO_ACTIVE_HIGH>, <&gpf0 1 GPIO_ACTIVE_HIGH>; i2c-gpio,delay-us = <2>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi index c23ba229fd05..8c33510c9519 100644 --- a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi @@ -105,19 +105,16 @@ sound-digital { compatible = "simple-audio-card"; simple-audio-card,name = "tda1997x-audio"; + simple-audio-card,format = "i2s"; + simple-audio-card,bitclock-master = <&sound_codec>; + simple-audio-card,frame-master = <&sound_codec>; - simple-audio-card,dai-link@0 { - format = "i2s"; + sound_cpu: simple-audio-card,cpu { + sound-dai = <&ssi1>; + }; - cpu { - sound-dai = <&ssi2>; - }; - - codec { - bitclock-master; - frame-master; - sound-dai = <&hdmi_receiver>; - }; + sound_codec: simple-audio-card,codec { + sound-dai = <&hdmi_receiver>; }; }; }; diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi index 7814f1ef0804..fde56f98398d 100644 --- a/arch/arm/boot/dts/imx6qdl-icore.dtsi +++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi @@ -384,7 +384,7 @@ pinctrl_usbotg: usbotggrp { fsl,pins = < - MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059 + MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059 >; }; @@ -396,6 +396,7 @@ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070 MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070 MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070 + MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0 >; }; diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi index 89f7b251603d..471cd61cb6de 100644 --- a/arch/arm/boot/dts/imx7ulp.dtsi +++ b/arch/arm/boot/dts/imx7ulp.dtsi @@ -576,7 +576,7 @@ clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, <&pcc3 IMX7ULP_CLK_PCTLC>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc1 0 0 32>; + gpio-ranges = <&iomuxc1 0 0 20>; }; gpio_ptd: gpio@40af0000 { @@ -590,7 +590,7 @@ clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, <&pcc3 IMX7ULP_CLK_PCTLD>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc1 0 32 32>; + gpio-ranges = <&iomuxc1 0 32 12>; }; gpio_pte: gpio@40b00000 { @@ -604,7 +604,7 @@ clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, <&pcc3 IMX7ULP_CLK_PCTLE>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc1 0 64 32>; + gpio-ranges = <&iomuxc1 0 64 16>; }; gpio_ptf: gpio@40b10000 { @@ -618,7 +618,7 @@ clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, <&pcc3 IMX7ULP_CLK_PCTLF>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc1 0 96 32>; + gpio-ranges = <&iomuxc1 0 96 20>; }; gpu: gpu@41800000 { diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi index 100396f6c2fe..395e05f10d36 100644 --- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi @@ -51,6 +51,8 @@ &mcbsp2 { status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&mcbsp2_pins>; }; &charger { @@ -102,35 +104,18 @@ regulator-max-microvolt = <3300000>; }; - lcd0: display@0 { - compatible = "panel-dpi"; - label = "28"; - status = "okay"; - /* default-on; */ + lcd0: display { + /* This isn't the exact LCD, but the timings meet spec */ + compatible = "logicpd,type28"; pinctrl-names = "default"; pinctrl-0 = <&lcd_enable_pin>; - enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */ + backlight = <&bl>; + enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; port { lcd_in: endpoint { remote-endpoint = <&dpi_out>; }; }; - - panel-timing { - clock-frequency = <9000000>; - hactive = <480>; - vactive = <272>; - hfront-porch = <3>; - hback-porch = <2>; - hsync-len = <42>; - vback-porch = <3>; - vfront-porch = <2>; - vsync-len = <11>; - hsync-active = <1>; - vsync-active = <1>; - de-active = <1>; - pixelclk-active = <0>; - }; }; bl: backlight { diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi index 449cc7616da6..e7a8f8addb6e 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi @@ -80,6 +80,8 @@ }; &mcbsp2 { + pinctrl-names = "default"; + pinctrl-0 = <&mcbsp2_pins>; status = "okay"; }; diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index edc0fc9f4738..8a9f132a32c1 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -183,7 +183,7 @@ #address-cells = <1>; #size-cells = <0>; reg = <0x0 0x1550000 0x0 0x10000>, - <0x0 0x40000000 0x0 0x40000000>; + <0x0 0x40000000 0x0 0x20000000>; reg-names = "QuadSPI", "QuadSPI-memory"; interrupts = ; clock-names = "qspi_en", "qspi"; @@ -759,7 +759,7 @@ fsl,tmr-prsc = <2>; fsl,tmr-add = <0xaaaaaaab>; fsl,tmr-fiper1 = <999999995>; - fsl,tmr-fiper2 = <99990>; + fsl,tmr-fiper2 = <999999995>; fsl,max-adj = <499999999>; fsl,extts-fifo; }; diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index 82f7ae030600..ab91c4ebb146 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi @@ -13,8 +13,10 @@ #interrupt-cells = <2>; #address-cells = <1>; #size-cells = <0>; - spi-max-frequency = <3000000>; + spi-max-frequency = <9600000>; spi-cs-high; + spi-cpol; + spi-cpha; cpcap_adc: adc { compatible = "motorola,mapphone-cpcap-adc"; diff --git a/arch/arm/boot/dts/mt7623n-rfb-emmc.dts b/arch/arm/boot/dts/mt7623n-rfb-emmc.dts index b7606130ade9..0447748f9fa0 100644 --- a/arch/arm/boot/dts/mt7623n-rfb-emmc.dts +++ b/arch/arm/boot/dts/mt7623n-rfb-emmc.dts @@ -138,6 +138,7 @@ mac@1 { compatible = "mediatek,eth-mac"; reg = <1>; + phy-mode = "rgmii"; phy-handle = <&phy5>; }; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 7f2ddb78da5f..4227da71cc62 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -105,6 +105,14 @@ linux,code = ; linux,can-disable; }; + + machine_cover { + label = "Machine Cover"; + gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ + linux,input-type = ; + linux,code = ; + linux,can-disable; + }; }; isp1707: isp1707 { @@ -814,10 +822,6 @@ pinctrl-0 = <&mmc1_pins>; vmmc-supply = <&vmmc1>; bus-width = <4>; - /* For debugging, it is often good idea to remove this GPIO. - It means you can remove back cover (to reboot by removing - battery) and still use the MMC card. */ - cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ }; /* most boards use vaux3, only some old versions use vmmc2 instead */ diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts index 8047e8cdb3af..4548d87534e3 100644 --- a/arch/arm/boot/dts/omap4-duovero-parlor.dts +++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts @@ -139,7 +139,7 @@ ethernet@gpmc { reg = <5 0 0xff>; interrupt-parent = <&gpio2>; - interrupts = <12 IRQ_TYPE_EDGE_FALLING>; /* gpio_44 */ + interrupts = <12 IRQ_TYPE_LEVEL_LOW>; /* gpio_44 */ phy-mode = "mii"; diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi index de981d629bdd..fdd267819319 100644 --- a/arch/arm/boot/dts/r8a7743.dtsi +++ b/arch/arm/boot/dts/r8a7743.dtsi @@ -338,7 +338,7 @@ #thermal-sensor-cells = <0>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7743", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -348,7 +348,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7743", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -357,7 +357,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7743", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -367,7 +367,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7743", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -376,7 +376,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7743", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -386,7 +386,7 @@ status = "disabled"; }; - ipmmu_gp: mmu@e62a0000 { + ipmmu_gp: iommu@e62a0000 { compatible = "renesas,ipmmu-r8a7743", "renesas,ipmmu-vmsa"; reg = <0 0xe62a0000 0 0x1000>; diff --git a/arch/arm/boot/dts/r8a7744.dtsi b/arch/arm/boot/dts/r8a7744.dtsi index fa74a262107b..8264481bf876 100644 --- a/arch/arm/boot/dts/r8a7744.dtsi +++ b/arch/arm/boot/dts/r8a7744.dtsi @@ -338,7 +338,7 @@ #thermal-sensor-cells = <0>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7744", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -348,7 +348,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7744", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -357,7 +357,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7744", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -367,7 +367,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7744", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -376,7 +376,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7744", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -386,7 +386,7 @@ status = "disabled"; }; - ipmmu_gp: mmu@e62a0000 { + ipmmu_gp: iommu@e62a0000 { compatible = "renesas,ipmmu-r8a7744", "renesas,ipmmu-vmsa"; reg = <0 0xe62a0000 0 0x1000>; diff --git a/arch/arm/boot/dts/r8a7745.dtsi b/arch/arm/boot/dts/r8a7745.dtsi index c53f7ff20695..c306713f2ab7 100644 --- a/arch/arm/boot/dts/r8a7745.dtsi +++ b/arch/arm/boot/dts/r8a7745.dtsi @@ -302,7 +302,7 @@ resets = <&cpg 407>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7745", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -312,7 +312,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7745", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -321,7 +321,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7745", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -331,7 +331,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7745", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -340,7 +340,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7745", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -350,7 +350,7 @@ status = "disabled"; }; - ipmmu_gp: mmu@e62a0000 { + ipmmu_gp: iommu@e62a0000 { compatible = "renesas,ipmmu-r8a7745", "renesas,ipmmu-vmsa"; reg = <0 0xe62a0000 0 0x1000>; diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index 5a2747758f67..e3ba00a22eeb 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -427,7 +427,7 @@ #thermal-sensor-cells = <0>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -437,7 +437,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -446,7 +446,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -456,7 +456,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -465,7 +465,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -475,7 +475,7 @@ status = "disabled"; }; - ipmmu_rt: mmu@ffc80000 { + ipmmu_rt: iommu@ffc80000 { compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa"; reg = <0 0xffc80000 0 0x1000>; diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index 6f875502453c..a26f86ccc579 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi @@ -350,7 +350,7 @@ #thermal-sensor-cells = <0>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -360,7 +360,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -369,7 +369,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -379,7 +379,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -388,7 +388,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -398,7 +398,7 @@ status = "disabled"; }; - ipmmu_rt: mmu@ffc80000 { + ipmmu_rt: iommu@ffc80000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xffc80000 0 0x1000>; @@ -407,7 +407,7 @@ status = "disabled"; }; - ipmmu_gp: mmu@e62a0000 { + ipmmu_gp: iommu@e62a0000 { compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa"; reg = <0 0xe62a0000 0 0x1000>; diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts index 42f3313e6988..9f507393c375 100644 --- a/arch/arm/boot/dts/r8a7793-gose.dts +++ b/arch/arm/boot/dts/r8a7793-gose.dts @@ -339,7 +339,7 @@ reg = <0x20>; remote = <&vin1>; - port { + ports { #address-cells = <1>; #size-cells = <0>; @@ -399,7 +399,7 @@ interrupts = <2 IRQ_TYPE_LEVEL_LOW>; default-input = <0>; - port { + ports { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi index bf05110fac4e..fa3839795018 100644 --- a/arch/arm/boot/dts/r8a7793.dtsi +++ b/arch/arm/boot/dts/r8a7793.dtsi @@ -336,7 +336,7 @@ #thermal-sensor-cells = <0>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -346,7 +346,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -355,7 +355,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -365,7 +365,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -374,7 +374,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -384,7 +384,7 @@ status = "disabled"; }; - ipmmu_rt: mmu@ffc80000 { + ipmmu_rt: iommu@ffc80000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xffc80000 0 0x1000>; @@ -393,7 +393,7 @@ status = "disabled"; }; - ipmmu_gp: mmu@e62a0000 { + ipmmu_gp: iommu@e62a0000 { compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa"; reg = <0 0xe62a0000 0 0x1000>; diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi index 8d797d34816e..9dd952479e68 100644 --- a/arch/arm/boot/dts/r8a7794.dtsi +++ b/arch/arm/boot/dts/r8a7794.dtsi @@ -290,7 +290,7 @@ resets = <&cpg 407>; }; - ipmmu_sy0: mmu@e6280000 { + ipmmu_sy0: iommu@e6280000 { compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa"; reg = <0 0xe6280000 0 0x1000>; @@ -300,7 +300,7 @@ status = "disabled"; }; - ipmmu_sy1: mmu@e6290000 { + ipmmu_sy1: iommu@e6290000 { compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa"; reg = <0 0xe6290000 0 0x1000>; @@ -309,7 +309,7 @@ status = "disabled"; }; - ipmmu_ds: mmu@e6740000 { + ipmmu_ds: iommu@e6740000 { compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa"; reg = <0 0xe6740000 0 0x1000>; @@ -319,7 +319,7 @@ status = "disabled"; }; - ipmmu_mp: mmu@ec680000 { + ipmmu_mp: iommu@ec680000 { compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa"; reg = <0 0xec680000 0 0x1000>; @@ -328,7 +328,7 @@ status = "disabled"; }; - ipmmu_mx: mmu@fe951000 { + ipmmu_mx: iommu@fe951000 { compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa"; reg = <0 0xfe951000 0 0x1000>; @@ -338,7 +338,7 @@ status = "disabled"; }; - ipmmu_gp: mmu@e62a0000 { + ipmmu_gp: iommu@e62a0000 { compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa"; reg = <0 0xe62a0000 0 0x1000>; diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi index 8ff70b856334..d419b77201f7 100644 --- a/arch/arm/boot/dts/s5pv210-aries.dtsi +++ b/arch/arm/boot/dts/s5pv210-aries.dtsi @@ -454,6 +454,7 @@ pinctrl-names = "default"; cap-sd-highspeed; cap-mmc-highspeed; + keep-power-in-suspend; mmc-pwrseq = <&wifi_pwrseq>; non-removable; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 4f3993cc0227..451030897220 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -710,7 +710,7 @@ }; }; - L2: l2-cache@fffef000 { + L2: cache-controller@fffef000 { compatible = "arm,pl310-cache"; reg = <0xfffef000 0x1000>; interrupts = <0 38 0x04>; diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index 2a86e72d9791..f261a3344071 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -636,7 +636,7 @@ reg = <0xffcfb100 0x80>; }; - L2: l2-cache@fffff000 { + L2: cache-controller@fffff000 { compatible = "arm,pl310-cache"; reg = <0xfffff000 0x1000>; interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>; @@ -819,7 +819,7 @@ timer3: timer3@ffd00100 { compatible = "snps,dw-apb-timer"; interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; - reg = <0xffd01000 0x100>; + reg = <0xffd00100 0x100>; clocks = <&l4_sys_free_clk>; clock-names = "timer"; resets = <&rst L4SYSTIMER1_RESET>; diff --git a/arch/arm/boot/dts/stm32mp157a-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-avenger96.dts index 2e4742c53d04..7b8c3f25861c 100644 --- a/arch/arm/boot/dts/stm32mp157a-avenger96.dts +++ b/arch/arm/boot/dts/stm32mp157a-avenger96.dts @@ -91,6 +91,9 @@ #address-cells = <1>; #size-cells = <0>; compatible = "snps,dwmac-mdio"; + reset-gpios = <&gpioz 2 GPIO_ACTIVE_LOW>; + reset-delay-us = <1000>; + phy0: ethernet-phy@7 { reg = <7>; }; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 4c268b70b735..e0a9b371c248 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -198,7 +198,7 @@ default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi index 6befa236ba99..fd31da8fd311 100644 --- a/arch/arm/boot/dts/sun5i.dtsi +++ b/arch/arm/boot/dts/sun5i.dtsi @@ -117,7 +117,7 @@ default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 8aebefd6accf..1f8b45f07e58 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -180,7 +180,7 @@ default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts index d277d043031b..4c6704e4c57e 100644 --- a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts +++ b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts @@ -31,7 +31,7 @@ pwr_led { label = "bananapi-m2-zero:red:pwr"; - gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PL10 */ + gpios = <&r_pio 0 10 GPIO_ACTIVE_LOW>; /* PL10 */ default-state = "on"; }; }; diff --git a/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi b/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi index 22466afd38a3..235994a4a2eb 100644 --- a/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi +++ b/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi @@ -16,15 +16,27 @@ regulator-type = "voltage"; regulator-boot-on; regulator-always-on; - regulator-min-microvolt = <1100000>; - regulator-max-microvolt = <1300000>; + regulator-min-microvolt = <1108475>; + regulator-max-microvolt = <1308475>; regulator-ramp-delay = <50>; /* 4ms */ gpios = <&r_pio 0 1 GPIO_ACTIVE_HIGH>; /* PL1 */ gpios-states = <0x1>; - states = <1100000 0>, <1300000 1>; + states = <1108475 0>, <1308475 1>; }; }; &cpu0 { cpu-supply = <®_vdd_cpux>; }; + +&cpu1 { + cpu-supply = <®_vdd_cpux>; +}; + +&cpu2 { + cpu-supply = <®_vdd_cpux>; +}; + +&cpu3 { + cpu-supply = <®_vdd_cpux>; +}; diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi index dfae90adbb7c..ce64bfb22f22 100644 --- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi +++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi @@ -31,7 +31,7 @@ #interrupt-cells = <1>; ranges; - nor_flash: flash@0,00000000 { + nor_flash: flash@0 { compatible = "arm,vexpress-flash", "cfi-flash"; reg = <0 0x00000000 0x04000000>, <4 0x00000000 0x04000000>; @@ -41,13 +41,13 @@ }; }; - psram@1,00000000 { + psram@100000000 { compatible = "arm,vexpress-psram", "mtd-ram"; reg = <1 0x00000000 0x02000000>; bank-width = <4>; }; - ethernet@2,02000000 { + ethernet@202000000 { compatible = "smsc,lan9118", "smsc,lan9115"; reg = <2 0x02000000 0x10000>; interrupts = <15>; @@ -59,14 +59,14 @@ vddvario-supply = <&v2m_fixed_3v3>; }; - usb@2,03000000 { + usb@203000000 { compatible = "nxp,usb-isp1761"; reg = <2 0x03000000 0x20000>; interrupts = <16>; port1-otg; }; - iofpga@3,00000000 { + iofpga@300000000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi index 028e0ec30e0c..fa248066d9d9 100644 --- a/arch/arm/boot/dts/vfxxx.dtsi +++ b/arch/arm/boot/dts/vfxxx.dtsi @@ -495,7 +495,7 @@ }; ocotp: ocotp@400a5000 { - compatible = "fsl,vf610-ocotp"; + compatible = "fsl,vf610-ocotp", "syscon"; reg = <0x400a5000 0x1000>; clocks = <&clks VF610_CLK_OCOTP>; }; diff --git a/arch/arm/configs/rpc_defconfig b/arch/arm/configs/rpc_defconfig index 3b82b64950d9..c090643b1ecb 100644 --- a/arch/arm/configs/rpc_defconfig +++ b/arch/arm/configs/rpc_defconfig @@ -32,7 +32,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig index 73ed73a8785a..153009130dab 100644 --- a/arch/arm/configs/s3c2410_defconfig +++ b/arch/arm/configs/s3c2410_defconfig @@ -202,7 +202,6 @@ CONFIG_EEPROM_AT24=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_CONSTANTS=y diff --git a/arch/arm/include/asm/clocksource.h b/arch/arm/include/asm/clocksource.h index 0b350a7e26f3..afb7a59828fe 100644 --- a/arch/arm/include/asm/clocksource.h +++ b/arch/arm/include/asm/clocksource.h @@ -1,8 +1,17 @@ #ifndef _ASM_CLOCKSOURCE_H #define _ASM_CLOCKSOURCE_H +enum vdso_arch_clockmode { + /* vdso clocksource not usable */ + VDSO_CLOCKMODE_NONE, + /* vdso clocksource usable */ + VDSO_CLOCKMODE_ARCHTIMER, + VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT = VDSO_CLOCKMODE_ARCHTIMER, +}; + struct arch_clocksource_data { - bool vdso_direct; /* Usable for direct VDSO access? */ + /* Usable for direct VDSO access? */ + enum vdso_arch_clockmode clock_mode; }; #endif diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 1b179b1f46bc..dd03d5e01a94 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -266,7 +266,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, unsigned flags); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index f44f448537f2..1a3eedbac4a2 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -5,6 +5,8 @@ #ifndef _ASM_ARM_PERCPU_H_ #define _ASM_ARM_PERCPU_H_ +#include + /* * Same as asm-generic/percpu.h, except that we store the per cpu offset * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index b0c195e3a06d..5f95e4b911a0 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -680,6 +680,12 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } +static int watchpoint_fault_on_uaccess(struct pt_regs *regs, + struct arch_hw_breakpoint *info) +{ + return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER; +} + static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { @@ -739,16 +745,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, } pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + + /* + * If we triggered a user watchpoint from a uaccess routine, + * then handle the stepping ourselves since userspace really + * can't help us with this. + */ + if (watchpoint_fault_on_uaccess(regs, info)) + goto step; + perf_bp_event(wp, regs); /* - * If no overflow handler is present, insert a temporary - * mismatch breakpoint so we can single-step over the - * watchpoint trigger. + * Defer stepping to the overflow handler if one is installed. + * Otherwise, insert a temporary mismatch breakpoint so that + * we can single-step over the watchpoint trigger. */ - if (is_default_overflow_handler(wp)) - enable_single_step(wp, instruction_pointer(regs)); + if (!is_default_overflow_handler(wp)) + goto unlock; +step: + enable_single_step(wp, instruction_pointer(regs)); unlock: rcu_read_unlock(); } diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 71778bb0475b..a082f6e4f0f4 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -22,6 +22,19 @@ * A simple function epilogue looks like this: * ldm sp, {fp, sp, pc} * + * When compiled with clang, pc and sp are not pushed. A simple function + * prologue looks like this when built with clang: + * + * stmdb {..., fp, lr} + * add fp, sp, #x + * sub sp, sp, #y + * + * A simple function epilogue looks like this when built with clang: + * + * sub sp, fp, #x + * ldm {..., fp, pc} + * + * * Note that with framepointer enabled, even the leaf functions have the same * prologue and epilogue, therefore we can ignore the LR value in this case. */ @@ -34,6 +47,16 @@ int notrace unwind_frame(struct stackframe *frame) low = frame->sp; high = ALIGN(low, THREAD_SIZE); +#ifdef CONFIG_CC_IS_CLANG + /* check current frame pointer is within bounds */ + if (fp < low + 4 || fp > high - 4) + return -EINVAL; + + frame->sp = frame->fp; + frame->fp = *(unsigned long *)(fp); + frame->pc = frame->lr; + frame->lr = *(unsigned long *)(fp + 4); +#else /* check current frame pointer is within bounds */ if (fp < low + 12 || fp > high - 4) return -EINVAL; @@ -42,6 +65,7 @@ int notrace unwind_frame(struct stackframe *frame) frame->fp = *(unsigned long *)(fp - 12); frame->sp = *(unsigned long *)(fp - 8); frame->pc = *(unsigned long *)(fp - 4); +#endif return 0; } diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index f00e45fa62c4..6c69a5548ba2 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -281,7 +281,7 @@ static bool tk_is_cntvct(const struct timekeeper *tk) if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) return false; - if (!tk->tkr_mono.clock->archdata.vdso_direct) + if (tk->tkr_mono.clock->archdata.clock_mode != VDSO_CLOCKMODE_ARCHTIMER) return false; return true; diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 52665f30d236..6bc3000deb86 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -592,13 +592,13 @@ static void __init at91_pm_sram_init(void) sram_pool = gen_pool_get(&pdev->dev, NULL); if (!sram_pool) { pr_warn("%s: sram pool unavailable!\n", __func__); - return; + goto out_put_device; } sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz); if (!sram_base) { pr_warn("%s: unable to alloc sram!\n", __func__); - return; + goto out_put_device; } sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); @@ -606,12 +606,17 @@ static void __init at91_pm_sram_init(void) at91_pm_suspend_in_sram_sz, false); if (!at91_suspend_sram_fn) { pr_warn("SRAM: Could not map\n"); - return; + goto out_put_device; } /* Copy the pm suspend handler to SRAM */ at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn, &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz); + return; + +out_put_device: + put_device(&pdev->dev); + return; } static bool __init at91_is_pm_mode_active(int pm_mode) diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S index ed57c879d4e1..2591cba61937 100644 --- a/arch/arm/mach-at91/pm_suspend.S +++ b/arch/arm/mach-at91/pm_suspend.S @@ -268,6 +268,10 @@ ENDPROC(at91_backup_mode) orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] + /* Quirk for SAM9X60's PMC */ + nop + nop + wait_mckrdy /* Enable the crystal oscillator */ diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c index 9a681b421ae1..cd861c57d5ad 100644 --- a/arch/arm/mach-exynos/mcpm-exynos.c +++ b/arch/arm/mach-exynos/mcpm-exynos.c @@ -26,6 +26,7 @@ #define EXYNOS5420_USE_L2_COMMON_UP_STATE BIT(30) static void __iomem *ns_sram_base_addr __ro_after_init; +static bool secure_firmware __ro_after_init; /* * The common v7_exit_coherency_flush API could not be used because of the @@ -58,15 +59,16 @@ static void __iomem *ns_sram_base_addr __ro_after_init; static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) { unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); + bool state; pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER || cluster >= EXYNOS5420_NR_CLUSTERS) return -EINVAL; - if (!exynos_cpu_power_state(cpunr)) { - exynos_cpu_power_up(cpunr); - + state = exynos_cpu_power_state(cpunr); + exynos_cpu_power_up(cpunr); + if (!state && secure_firmware) { /* * This assumes the cluster number of the big cores(Cortex A15) * is 0 and the Little cores(Cortex A7) is 1. @@ -258,6 +260,8 @@ static int __init exynos_mcpm_init(void) return -ENOMEM; } + secure_firmware = exynos_secure_firmware_available(); + /* * To increase the stability of KFC reset we need to program * the PMU SPARE3 register diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c index f057df813f83..e9962b48e30c 100644 --- a/arch/arm/mach-imx/pm-imx5.c +++ b/arch/arm/mach-imx/pm-imx5.c @@ -295,14 +295,14 @@ static int __init imx_suspend_alloc_ocram( if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; - goto put_node; + goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, size); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } phys = gen_pool_virt_to_phys(ocram_pool, ocram_base); @@ -312,6 +312,8 @@ static int __init imx_suspend_alloc_ocram( if (virt_out) *virt_out = virt; +put_device: + put_device(&pdev->dev); put_node: of_node_put(node); diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index df767169ad2c..9f44f0fa121f 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -1066,6 +1066,7 @@ void __init imx6_pm_map_io(void) static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) { + struct device_node *node; struct imx6_cpu_pm_info *pm_info; unsigned long iram_paddr; int i, ret = 0; @@ -1079,8 +1080,11 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) return -EINVAL; } - if (psci_ops.cpu_suspend) + if (psci_ops.cpu_suspend) { + /* TODO: seems not needed */ + /* of_node_put(node); */ return ret; + } /* * 16KB is allocated for IRAM TLB, but only up 8k is for kernel TLB, @@ -1226,6 +1230,11 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) &imx6_suspend, MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info)); + goto put_node; + +put_node: + of_node_put(node); + return ret; } diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig index 982eabc36163..2406cab73835 100644 --- a/arch/arm/mach-integrator/Kconfig +++ b/arch/arm/mach-integrator/Kconfig @@ -4,6 +4,8 @@ menuconfig ARCH_INTEGRATOR depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V6 select ARM_AMBA select COMMON_CLK_VERSATILE + select CMA + select DMA_CMA select HAVE_TCM select ICST select MFD_SYSCON @@ -35,14 +37,13 @@ config INTEGRATOR_IMPD1 select ARM_VIC select GPIO_PL061 select GPIOLIB + select REGULATOR + select REGULATOR_FIXED_VOLTAGE help The IM-PD1 is an add-on logic module for the Integrator which allows ARM(R) Ltd PrimeCells to be developed and evaluated. The IM-PD1 can be found on the Integrator/PP2 platform. - To compile this driver as a module, choose M here: the - module will be called impd1. - config INTEGRATOR_CM7TDMI bool "Integrator/CM7TDMI core module" depends on ARCH_INTEGRATOR_AP diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c index f1a6ece8108e..78247e6f4a72 100644 --- a/arch/arm/mach-omap2/omap-iommu.c +++ b/arch/arm/mach-omap2/omap-iommu.c @@ -11,14 +11,43 @@ #include "omap_hwmod.h" #include "omap_device.h" +#include "clockdomain.h" #include "powerdomain.h" +static void omap_iommu_dra7_emu_swsup_config(struct platform_device *pdev, + bool enable) +{ + static struct clockdomain *emu_clkdm; + static DEFINE_SPINLOCK(emu_lock); + static atomic_t count; + struct device_node *np = pdev->dev.of_node; + + if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) + return; + + if (!emu_clkdm) { + emu_clkdm = clkdm_lookup("emu_clkdm"); + if (WARN_ON_ONCE(!emu_clkdm)) + return; + } + + spin_lock(&emu_lock); + + if (enable && (atomic_inc_return(&count) == 1)) + clkdm_deny_idle(emu_clkdm); + else if (!enable && (atomic_dec_return(&count) == 0)) + clkdm_allow_idle(emu_clkdm); + + spin_unlock(&emu_lock); +} + int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request, u8 *pwrst) { struct powerdomain *pwrdm; struct omap_device *od; u8 next_pwrst; + int ret = 0; od = to_omap_device(pdev); if (!od) @@ -31,13 +60,21 @@ int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request, if (!pwrdm) return -EINVAL; - if (request) + if (request) { *pwrst = pwrdm_read_next_pwrst(pwrdm); + omap_iommu_dra7_emu_swsup_config(pdev, true); + } if (*pwrst > PWRDM_POWER_RET) - return 0; + goto out; next_pwrst = request ? PWRDM_POWER_ON : *pwrst; - return pwrdm_set_next_pwrst(pwrdm, next_pwrst); + ret = pwrdm_set_next_pwrst(pwrdm, next_pwrst); + +out: + if (!request) + omap_iommu_dra7_emu_swsup_config(pdev, false); + + return ret; } diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 203664c40d3d..eb74aa182661 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -3535,7 +3535,7 @@ static const struct omap_hwmod_reset dra7_reset_quirks[] = { }; static const struct omap_hwmod_reset omap_reset_quirks[] = { - { .match = "dss", .len = 3, .reset = omap_dss_reset, }, + { .match = "dss_core", .len = 8, .reset = omap_dss_reset, }, { .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, }, { .match = "i2c", .len = 3, .reset = omap_i2c_reset, }, { .match = "wd_timer", .len = 8, .reset = omap2_wd_timer_reset, }, diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 247e3f8acffe..ca07e310d9ed 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -44,6 +44,17 @@ struct pdata_init { static struct of_dev_auxdata omap_auxdata_lookup[]; static struct twl4030_gpio_platform_data twl_gpio_auxdata; +#if IS_ENABLED(CONFIG_OMAP_IOMMU) +int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request, + u8 *pwrst); +#else +static inline int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, + bool request, u8 *pwrst) +{ + return 0; +} +#endif + #ifdef CONFIG_MACH_NOKIA_N8X0 static void __init omap2420_n8x0_legacy_init(void) { @@ -311,16 +322,6 @@ static void __init omap3_pandora_legacy_init(void) } #endif /* CONFIG_ARCH_OMAP3 */ -#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) -static struct iommu_platform_data omap4_iommu_pdata = { - .reset_name = "mmu_cache", - .assert_reset = omap_device_assert_hardreset, - .deassert_reset = omap_device_deassert_hardreset, - .device_enable = omap_device_enable, - .device_idle = omap_device_idle, -}; -#endif - #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX) static struct wkup_m3_platform_data wkup_m3_data = { .reset_name = "wkup_m3", @@ -336,6 +337,10 @@ static void __init omap5_uevm_legacy_init(void) #endif #ifdef CONFIG_SOC_DRA7XX +static struct iommu_platform_data dra7_ipu1_dsp_iommu_pdata = { + .set_pwrdm_constraint = omap_iommu_set_pwrdm_constraint, +}; + static struct omap_hsmmc_platform_data dra7_hsmmc_data_mmc1; static struct omap_hsmmc_platform_data dra7_hsmmc_data_mmc2; static struct omap_hsmmc_platform_data dra7_hsmmc_data_mmc3; @@ -543,10 +548,6 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = { &wkup_m3_data), #endif #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) - OF_DEV_AUXDATA("ti,omap4-iommu", 0x4a066000, "4a066000.mmu", - &omap4_iommu_pdata), - OF_DEV_AUXDATA("ti,omap4-iommu", 0x55082000, "55082000.mmu", - &omap4_iommu_pdata), OF_DEV_AUXDATA("ti,omap4-smartreflex-iva", 0x4a0db000, "4a0db000.smartreflex", &omap_sr_pdata[OMAP_SR_IVA]), OF_DEV_AUXDATA("ti,omap4-smartreflex-core", 0x4a0dd000, @@ -561,6 +562,12 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = { &dra7_hsmmc_data_mmc2), OF_DEV_AUXDATA("ti,dra7-hsmmc", 0x480ad000, "480ad000.mmc", &dra7_hsmmc_data_mmc3), + OF_DEV_AUXDATA("ti,dra7-dsp-iommu", 0x40d01000, "40d01000.mmu", + &dra7_ipu1_dsp_iommu_pdata), + OF_DEV_AUXDATA("ti,dra7-dsp-iommu", 0x41501000, "41501000.mmu", + &dra7_ipu1_dsp_iommu_pdata), + OF_DEV_AUXDATA("ti,dra7-iommu", 0x58882000, "58882000.mmu", + &dra7_ipu1_dsp_iommu_pdata), #endif /* Common auxdata */ OF_DEV_AUXDATA("ti,sysc", 0, NULL, &ti_sysc_pdata), diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c index 6ed887cf8dc9..365c0428b21b 100644 --- a/arch/arm/mach-socfpga/pm.c +++ b/arch/arm/mach-socfpga/pm.c @@ -49,14 +49,14 @@ static int socfpga_setup_ocram_self_refresh(void) if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; - goto put_node; + goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); @@ -67,7 +67,7 @@ static int socfpga_setup_ocram_self_refresh(void) if (!suspend_ocram_base) { pr_warn("%s: __arm_ioremap_exec failed!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } /* Copy the code that puts DDR in self refresh to ocram */ @@ -81,6 +81,8 @@ static int socfpga_setup_ocram_self_refresh(void) if (!socfpga_sdram_self_refresh_in_ocram) ret = -EFAULT; +put_device: + put_device(&pdev->dev); put_node: of_node_put(np); diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c index e512e606eabd..5ea3421fa1e8 100644 --- a/arch/arm/mach-tegra/tegra.c +++ b/arch/arm/mach-tegra/tegra.c @@ -106,8 +106,8 @@ static const char * const tegra_dt_board_compat[] = { }; DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)") - .l2c_aux_val = 0x3c400001, - .l2c_aux_mask = 0xc20fc3fe, + .l2c_aux_val = 0x3c400000, + .l2c_aux_mask = 0xc20fc3ff, .smp = smp_ops(tegra_smp_ops), .map_io = tegra_map_common_io, .init_early = tegra_init_early, diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 5461d589a1e2..60ac7c5999a9 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -5,6 +5,7 @@ * VMA_VM_FLAGS * VM_EXEC */ +#include #include #include @@ -30,7 +31,7 @@ * act_mm - get current->active_mm */ .macro act_mm, rd - bic \rd, sp, #8128 + bic \rd, sp, #(THREAD_SIZE - 1) & ~63 bic \rd, \rd, #63 ldr \rd, [\rd, #TI_TASK] .if (TSK_ACTIVE_MM > IMM12_MASK) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index d65aef47ece3..96abe558aea8 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -146,6 +146,8 @@ zinstall install: PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@ + $(if $(CONFIG_COMPAT_VDSO), \ + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@) # We use MRPROPER_FILES and CLEAN_FILES now archclean: diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index d1fc9c2055f4..9498d1de730c 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -77,7 +77,7 @@ method = "smc"; }; - intc: intc@fffc1000 { + intc: interrupt-controller@fffc1000 { compatible = "arm,gic-400", "arm,cortex-a15-gic"; #interrupt-cells = <3>; interrupt-controller; @@ -302,7 +302,7 @@ status = "disabled"; }; - nand: nand@ffb90000 { + nand: nand-controller@ffb90000 { #address-cells = <1>; #size-cells = <0>; compatible = "altr,socfpga-denali-nand"; @@ -445,7 +445,7 @@ clock-names = "timer"; }; - uart0: serial0@ffc02000 { + uart0: serial@ffc02000 { compatible = "snps,dw-apb-uart"; reg = <0xffc02000 0x100>; interrupts = <0 108 4>; @@ -456,7 +456,7 @@ status = "disabled"; }; - uart1: serial1@ffc02100 { + uart1: serial@ffc02100 { compatible = "snps,dw-apb-uart"; reg = <0xffc02100 0x100>; interrupts = <0 109 4>; diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi index bb4a2acb9970..502c4ac45c29 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi @@ -1728,18 +1728,18 @@ }; sram: sram@fffc0000 { - compatible = "amlogic,meson-axg-sram", "mmio-sram"; + compatible = "mmio-sram"; reg = <0x0 0xfffc0000 0x0 0x20000>; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x0 0xfffc0000 0x20000>; - cpu_scp_lpri: scp-shmem@13000 { + cpu_scp_lpri: scp-sram@13000 { compatible = "amlogic,meson-axg-scp-shmem"; reg = <0x13000 0x400>; }; - cpu_scp_hpri: scp-shmem@13400 { + cpu_scp_hpri: scp-sram@13400 { compatible = "amlogic,meson-axg-scp-shmem"; reg = <0x13400 0x400>; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi index 2199a54c720c..1234bc797429 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi @@ -2381,6 +2381,7 @@ dr_mode = "host"; snps,dis_u2_susphy_quirk; snps,quirk-frame-length-adjustment; + snps,parkmode-disable-ss-quirk; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 6733050d735f..ce230d6ac35c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -345,20 +345,20 @@ }; sram: sram@c8000000 { - compatible = "amlogic,meson-gx-sram", "amlogic,meson-gxbb-sram", "mmio-sram"; + compatible = "mmio-sram"; reg = <0x0 0xc8000000 0x0 0x14000>; #address-cells = <1>; #size-cells = <1>; ranges = <0 0x0 0xc8000000 0x14000>; - cpu_scp_lpri: scp-shmem@0 { - compatible = "amlogic,meson-gx-scp-shmem", "amlogic,meson-gxbb-scp-shmem"; + cpu_scp_lpri: scp-sram@0 { + compatible = "amlogic,meson-gxbb-scp-shmem"; reg = <0x13000 0x400>; }; - cpu_scp_hpri: scp-shmem@200 { - compatible = "amlogic,meson-gx-scp-shmem", "amlogic,meson-gxbb-scp-shmem"; + cpu_scp_hpri: scp-sram@200 { + compatible = "amlogic,meson-gxbb-scp-shmem"; reg = <0x13400 0x400>; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts index 82b1c4851147..e034bbff8e66 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts @@ -9,7 +9,7 @@ #include -#include "meson-gxl-s905x.dtsi" +#include "meson-gxl-s805x.dtsi" / { compatible = "libretech,aml-s805x-ac", "amlogic,s805x", diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts index 3a1484e5b8e1..fbc687c9ff83 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts @@ -9,7 +9,7 @@ #include -#include "meson-gxl-s905x.dtsi" +#include "meson-gxl-s805x.dtsi" / { compatible = "amlogic,p241", "amlogic,s805x", "amlogic,meson-gxl"; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi new file mode 100644 index 000000000000..f9d705648426 --- /dev/null +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2020 BayLibre SAS + * Author: Neil Armstrong + */ + +#include "meson-gxl-s905x.dtsi" + +/ { + compatible = "amlogic,s805x", "amlogic,meson-gxl"; +}; + +/* The S805X Package doesn't seem to handle the 744MHz OPP correctly */ +&mali { + assigned-clocks = <&clkc CLKID_MALI_0_SEL>, + <&clkc CLKID_MALI_0>, + <&clkc CLKID_MALI>; /* Glitch free mux */ + assigned-clock-parents = <&clkc CLKID_FCLK_DIV3>, + <0>, /* Do Nothing */ + <&clkc CLKID_MALI_0>; + assigned-clock-rates = <0>, /* Do Nothing */ + <666666666>, + <0>; /* Do Nothing */ +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index 49ff0a7d0210..e3cfa24dca5a 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -288,6 +288,11 @@ }; }; +&hwrng { + clocks = <&clkc CLKID_RNG0>; + clock-names = "core"; +}; + &i2c_A { clocks = <&clkc CLKID_I2C>; }; diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi b/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi index 15fe81738e94..dfb23dfc0b0f 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi @@ -8,7 +8,7 @@ gic: interrupt-controller@2c001000 { compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic"; #interrupt-cells = <3>; - #address-cells = <2>; + #address-cells = <1>; interrupt-controller; reg = <0x0 0x2c001000 0 0x1000>, <0x0 0x2c002000 0 0x2000>, diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi index f2c75c756039..906f51935b36 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi @@ -8,9 +8,9 @@ gic: interrupt-controller@2f000000 { compatible = "arm,gic-v3"; #interrupt-cells = <3>; - #address-cells = <2>; - #size-cells = <2>; - ranges; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x2f000000 0x100000>; interrupt-controller; reg = <0x0 0x2f000000 0x0 0x10000>, <0x0 0x2f100000 0x0 0x200000>, @@ -22,7 +22,7 @@ its: its@2f020000 { compatible = "arm,gic-v3-its"; msi-controller; - reg = <0x0 0x2f020000 0x0 0x20000>; + reg = <0x20000 0x20000>; }; }; }; diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi index 3f78373f708a..05d1657170b4 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi @@ -107,51 +107,51 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 63>; - interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, - <0 0 1 &gic 0 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, - <0 0 2 &gic 0 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, - <0 0 3 &gic 0 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, - <0 0 4 &gic 0 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, - <0 0 5 &gic 0 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, - <0 0 6 &gic 0 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, - <0 0 7 &gic 0 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, - <0 0 8 &gic 0 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, - <0 0 9 &gic 0 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, - <0 0 10 &gic 0 0 GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, - <0 0 11 &gic 0 0 GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, - <0 0 12 &gic 0 0 GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, - <0 0 13 &gic 0 0 GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, - <0 0 14 &gic 0 0 GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, - <0 0 15 &gic 0 0 GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, - <0 0 16 &gic 0 0 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, - <0 0 17 &gic 0 0 GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, - <0 0 18 &gic 0 0 GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, - <0 0 19 &gic 0 0 GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, - <0 0 20 &gic 0 0 GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, - <0 0 21 &gic 0 0 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, - <0 0 22 &gic 0 0 GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, - <0 0 23 &gic 0 0 GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, - <0 0 24 &gic 0 0 GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, - <0 0 25 &gic 0 0 GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, - <0 0 26 &gic 0 0 GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, - <0 0 27 &gic 0 0 GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, - <0 0 28 &gic 0 0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, - <0 0 29 &gic 0 0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, - <0 0 30 &gic 0 0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, - <0 0 31 &gic 0 0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, - <0 0 32 &gic 0 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, - <0 0 33 &gic 0 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, - <0 0 34 &gic 0 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, - <0 0 35 &gic 0 0 GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, - <0 0 36 &gic 0 0 GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, - <0 0 37 &gic 0 0 GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, - <0 0 38 &gic 0 0 GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, - <0 0 39 &gic 0 0 GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, - <0 0 40 &gic 0 0 GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, - <0 0 41 &gic 0 0 GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, - <0 0 42 &gic 0 0 GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 0 &gic 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, + <0 0 1 &gic 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, + <0 0 2 &gic 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, + <0 0 3 &gic 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, + <0 0 4 &gic 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, + <0 0 5 &gic 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, + <0 0 6 &gic 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, + <0 0 7 &gic 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, + <0 0 8 &gic 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, + <0 0 9 &gic 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, + <0 0 10 &gic 0 GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, + <0 0 11 &gic 0 GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, + <0 0 12 &gic 0 GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, + <0 0 13 &gic 0 GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, + <0 0 14 &gic 0 GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, + <0 0 15 &gic 0 GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, + <0 0 16 &gic 0 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, + <0 0 17 &gic 0 GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, + <0 0 18 &gic 0 GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, + <0 0 19 &gic 0 GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, + <0 0 20 &gic 0 GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, + <0 0 21 &gic 0 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, + <0 0 22 &gic 0 GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, + <0 0 23 &gic 0 GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, + <0 0 24 &gic 0 GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, + <0 0 25 &gic 0 GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, + <0 0 26 &gic 0 GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, + <0 0 27 &gic 0 GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, + <0 0 28 &gic 0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, + <0 0 29 &gic 0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, + <0 0 30 &gic 0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, + <0 0 31 &gic 0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, + <0 0 32 &gic 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, + <0 0 33 &gic 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, + <0 0 34 &gic 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, + <0 0 35 &gic 0 GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, + <0 0 36 &gic 0 GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, + <0 0 37 &gic 0 GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, + <0 0 38 &gic 0 GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, + <0 0 39 &gic 0 GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, + <0 0 40 &gic 0 GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, + <0 0 41 &gic 0 GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, + <0 0 42 &gic 0 GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; - ethernet@2,02000000 { + ethernet@202000000 { compatible = "smsc,lan91c111"; reg = <2 0x02000000 0x10000>; interrupts = <15>; @@ -178,7 +178,7 @@ clock-output-names = "v2m:refclk32khz"; }; - iofpga@3,00000000 { + iofpga@300000000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index 8c11660bbe40..c47f76b01c4b 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -62,35 +62,35 @@ <0x0 0x2c02f000 0 0x2000>, <0x0 0x2c04f000 0 0x2000>, <0x0 0x2c06f000 0 0x2000>; - #address-cells = <2>; + #address-cells = <1>; #interrupt-cells = <3>; - #size-cells = <2>; + #size-cells = <1>; interrupt-controller; interrupts = ; - ranges = <0 0 0 0x2c1c0000 0 0x40000>; + ranges = <0 0 0x2c1c0000 0x40000>; v2m_0: v2m@0 { compatible = "arm,gic-v2m-frame"; msi-controller; - reg = <0 0 0 0x10000>; + reg = <0 0x10000>; }; v2m@10000 { compatible = "arm,gic-v2m-frame"; msi-controller; - reg = <0 0x10000 0 0x10000>; + reg = <0x10000 0x10000>; }; v2m@20000 { compatible = "arm,gic-v2m-frame"; msi-controller; - reg = <0 0x20000 0 0x10000>; + reg = <0x20000 0x10000>; }; v2m@30000 { compatible = "arm,gic-v2m-frame"; msi-controller; - reg = <0 0x30000 0 0x10000>; + reg = <0x30000 0x10000>; }; }; @@ -519,10 +519,10 @@ <0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 2 &gic 0 0 GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 3 &gic 0 0 GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 4 &gic 0 0 GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 0 1 &gic 0 GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &gic 0 GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &gic 0 GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &gic 0 GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; msi-parent = <&v2m_0>; status = "disabled"; iommu-map-mask = <0x0>; /* RC has no means to output PCI RID */ @@ -786,19 +786,19 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 15>; - interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, - <0 0 1 &gic 0 0 GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>, - <0 0 2 &gic 0 0 GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>, - <0 0 3 &gic 0 0 GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, - <0 0 4 &gic 0 0 GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>, - <0 0 5 &gic 0 0 GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, - <0 0 6 &gic 0 0 GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>, - <0 0 7 &gic 0 0 GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>, - <0 0 8 &gic 0 0 GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>, - <0 0 9 &gic 0 0 GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>, - <0 0 10 &gic 0 0 GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>, - <0 0 11 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, - <0 0 12 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 0 &gic 0 GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, + <0 0 1 &gic 0 GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>, + <0 0 2 &gic 0 GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>, + <0 0 3 &gic 0 GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, + <0 0 4 &gic 0 GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>, + <0 0 5 &gic 0 GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, + <0 0 6 &gic 0 GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>, + <0 0 7 &gic 0 GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>, + <0 0 8 &gic 0 GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>, + <0 0 9 &gic 0 GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>, + <0 0 10 &gic 0 GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>, + <0 0 11 &gic 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, + <0 0 12 &gic 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>; }; site2: tlx@60000000 { @@ -808,6 +808,6 @@ ranges = <0 0 0x60000000 0x10000000>; #interrupt-cells = <1>; interrupt-map-mask = <0 0>; - interrupt-map = <0 0 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 &gic 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; }; }; diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi index 9f60dacb4f80..1234a8cfc0a9 100644 --- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi +++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi @@ -103,7 +103,7 @@ }; }; - flash@0,00000000 { + flash@0 { /* 2 * 32MiB NOR Flash memory mounted on CS0 */ compatible = "arm,vexpress-flash", "cfi-flash"; reg = <0 0x00000000 0x04000000>; @@ -120,7 +120,7 @@ }; }; - ethernet@2,00000000 { + ethernet@200000000 { compatible = "smsc,lan9118", "smsc,lan9115"; reg = <2 0x00000000 0x10000>; interrupts = <3>; @@ -133,7 +133,7 @@ vddvario-supply = <&mb_fixed_3v3>; }; - iofpga@3,00000000 { + iofpga@300000000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi index 57b0b9d7f3fa..29e6962c70bd 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi +++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi @@ -9,7 +9,7 @@ motherboard { arm,v2m-memory-map = "rs2"; - iofpga@3,00000000 { + iofpga@300000000 { virtio-p9@140000 { compatible = "virtio,mmio"; reg = <0x140000 0x200>; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi index 03a7bf079c8f..ad20076357f5 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi +++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi @@ -17,14 +17,14 @@ #interrupt-cells = <1>; ranges; - flash@0,00000000 { + flash@0 { compatible = "arm,vexpress-flash", "cfi-flash"; reg = <0 0x00000000 0x04000000>, <4 0x00000000 0x04000000>; bank-width = <4>; }; - ethernet@2,02000000 { + ethernet@202000000 { compatible = "smsc,lan91c111"; reg = <2 0x02000000 0x10000>; interrupts = <15>; @@ -51,7 +51,7 @@ clock-output-names = "v2m:refclk32khz"; }; - iofpga@3,00000000 { + iofpga@300000000 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index 15f7b0ed3836..39802066232e 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -745,7 +745,7 @@ }; qspi: spi@66470200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi"; + compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi"; reg = <0x66470200 0x184>, <0x66470000 0x124>, <0x67017408 0x004>, diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts index 080e0f56e108..61ee7b6a3159 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts @@ -157,6 +157,7 @@ regulator-min-microvolt = <700000>; regulator-max-microvolt = <1150000>; regulator-enable-ramp-delay = <125>; + regulator-always-on; }; ldo8_reg: LDO8 { diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts index f9616967d8f0..94e00bc02ba4 100755 --- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts @@ -512,7 +512,7 @@ ldo1_reg: LDO1 { regulator-name = "LDO1"; regulator-min-microvolt = <1600000>; - regulator-max-microvolt = <1900000>; + regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts index 5fb3cce6e192..32cd6b7564ff 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts @@ -687,7 +687,7 @@ ldo1_reg: LDO1 { regulator-name = "LDO1"; regulator-min-microvolt = <1600000>; - regulator-max-microvolt = <1900000>; + regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index 237464006787..dab424b62218 100755 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -434,7 +434,7 @@ tmu: tmu@30260000 { compatible = "fsl,imx8mq-tmu"; reg = <0x30260000 0x10000>; - interrupt = ; + interrupts = ; clocks = <&clk IMX8MQ_CLK_TMU_ROOT>; little-endian; fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>; diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts index e035cf195b19..8c4bfbaf3a80 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts +++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts @@ -530,6 +530,17 @@ status = "ok"; compatible = "adi,adv7533"; reg = <0x39>; + adi,dsi-lanes = <4>; + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + }; + port@1 { + reg = <1>; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index c14205cd6bf5..3e47150c05ec 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts @@ -516,7 +516,7 @@ reg = <0x39>; interrupt-parent = <&gpio1>; interrupts = <1 2>; - pd-gpio = <&gpio0 4 0>; + pd-gpios = <&gpio0 4 0>; adi,dsi-lanes = <4>; #sound-dai-cells = <0>; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts index fbcf03f86c96..05dc58c13fa4 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts @@ -19,6 +19,12 @@ model = "Globalscale Marvell ESPRESSOBin Board"; compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710"; + aliases { + ethernet0 = ð0; + serial0 = &uart0; + serial1 = &uart1; + }; + chosen { stdout-path = "serial0:115200n8"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts index 5f350cc71a2f..c3668187b844 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts @@ -95,7 +95,7 @@ }; sfp: sfp { - compatible = "sff,sfp+"; + compatible = "sff,sfp"; i2c-bus = <&i2c0>; los-gpio = <&moxtet_sfp 0 GPIO_ACTIVE_HIGH>; tx-fault-gpio = <&moxtet_sfp 1 GPIO_ACTIVE_HIGH>; @@ -171,6 +171,8 @@ marvell,pad-type = "sd"; vqmmc-supply = <&vsdio_reg>; mmc-pwrseq = <&sdhci1_pwrseq>; + /* forbid SDR104 for FCC purposes */ + sdhci-caps-mask = <0x2 0x0>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi index dac51e98204c..7cd8c3f52b47 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi @@ -686,6 +686,8 @@ clocks = <&pericfg CLK_PERI_MSDC30_0_PD>, <&topckgen CLK_TOP_MSDC50_0_SEL>; clock-names = "source", "hclk"; + resets = <&pericfg MT7622_PERI_MSDC0_SW_RST>; + reset-names = "hrst"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index 5891b7151432..dec5e4113ce4 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -238,21 +238,21 @@ cpu_on = <0x84000003>; }; - clk26m: oscillator@0 { + clk26m: oscillator0 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <26000000>; clock-output-names = "clk26m"; }; - clk32k: oscillator@1 { + clk32k: oscillator1 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <32000>; clock-output-names = "clk32k"; }; - cpum_ck: oscillator@2 { + cpum_ck: oscillator2 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <0>; @@ -268,19 +268,19 @@ sustainable-power = <1500>; /* milliwatts */ trips { - threshold: trip-point@0 { + threshold: trip-point0 { temperature = <68000>; hysteresis = <2000>; type = "passive"; }; - target: trip-point@1 { + target: trip-point1 { temperature = <85000>; hysteresis = <2000>; type = "passive"; }; - cpu_crit: cpu_crit@0 { + cpu_crit: cpu_crit0 { temperature = <115000>; hysteresis = <2000>; type = "critical"; @@ -288,13 +288,13 @@ }; cooling-maps { - map@0 { + map0 { trip = <&target>; cooling-device = <&cpu0 0 0>, <&cpu1 0 0>; contribution = <3072>; }; - map@1 { + map1 { trip = <&target>; cooling-device = <&cpu2 0 0>, <&cpu3 0 0>; @@ -308,7 +308,7 @@ #address-cells = <2>; #size-cells = <2>; ranges; - vpu_dma_reserved: vpu_dma_mem_region { + vpu_dma_reserved: vpu_dma_mem_region@b7000000 { compatible = "shared-dma-pool"; reg = <0 0xb7000000 0 0x500000>; alignment = <0x1000>; @@ -360,7 +360,7 @@ reg = <0 0x10005000 0 0x1000>; }; - pio: pinctrl@10005000 { + pio: pinctrl@1000b000 { compatible = "mediatek,mt8173-pinctrl"; reg = <0 0x1000b000 0 0x1000>; mediatek,pctl-regmap = <&syscfg_pctl_a>; @@ -567,7 +567,7 @@ status = "disabled"; }; - gic: interrupt-controller@10220000 { + gic: interrupt-controller@10221000 { compatible = "arm,gic-400"; #interrupt-cells = <3>; interrupt-parent = <&gic>; diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi index 47cd831fcf44..9abf0cb1dd67 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi @@ -309,8 +309,9 @@ compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03400000 0x0 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA186_CLK_SDMMC1>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC1>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC1>; reset-names = "sdhci"; iommus = <&smmu TEGRA186_SID_SDMMC1>; @@ -335,8 +336,9 @@ compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03420000 0x0 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA186_CLK_SDMMC2>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC2>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC2>; reset-names = "sdhci"; iommus = <&smmu TEGRA186_SID_SDMMC2>; @@ -356,8 +358,9 @@ compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03440000 0x0 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA186_CLK_SDMMC3>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC3>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC3>; reset-names = "sdhci"; iommus = <&smmu TEGRA186_SID_SDMMC3>; @@ -379,8 +382,9 @@ compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03460000 0x0 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA186_CLK_SDMMC4>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC4>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; assigned-clocks = <&bpmp TEGRA186_CLK_SDMMC4>, <&bpmp TEGRA186_CLK_PLLC4_VCO>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLC4_VCO>; diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi index 02909a48dfcd..7899759a12f8 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi @@ -32,7 +32,7 @@ phy-reset-gpios = <&gpio TEGRA194_MAIN_GPIO(G, 5) GPIO_ACTIVE_LOW>; phy-handle = <&phy>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; mdio { #address-cells = <1>; diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index 457b815d57f4..5728255bd0c1 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -403,8 +403,9 @@ compatible = "nvidia,tegra194-sdhci", "nvidia,tegra186-sdhci"; reg = <0x03400000 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA194_CLK_SDMMC1>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA194_CLK_SDMMC1>, + <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA194_RESET_SDMMC1>; reset-names = "sdhci"; nvidia,pad-autocal-pull-up-offset-3v3-timeout = @@ -425,8 +426,9 @@ compatible = "nvidia,tegra194-sdhci", "nvidia,tegra186-sdhci"; reg = <0x03440000 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA194_CLK_SDMMC3>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA194_CLK_SDMMC3>, + <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA194_RESET_SDMMC3>; reset-names = "sdhci"; nvidia,pad-autocal-pull-up-offset-1v8 = <0x00>; @@ -448,8 +450,9 @@ compatible = "nvidia,tegra194-sdhci", "nvidia,tegra186-sdhci"; reg = <0x03460000 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA194_CLK_SDMMC4>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA194_CLK_SDMMC4>, + <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; assigned-clocks = <&bpmp TEGRA194_CLK_SDMMC4>, <&bpmp TEGRA194_CLK_PLLC4>; assigned-clock-parents = @@ -1192,7 +1195,7 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x30100000 0x0 0x30100000 0x0 0x00100000 /* downstream I/O (1MB) */ - 0xc2000000 0x12 0x00000000 0x12 0x00000000 0x0 0x30000000 /* prefetchable memory (768MB) */ + 0xc3000000 0x12 0x00000000 0x12 0x00000000 0x0 0x30000000 /* prefetchable memory (768MB) */ 0x82000000 0x0 0x40000000 0x12 0x30000000 0x0 0x10000000>; /* non-prefetchable memory (256MB) */ }; @@ -1238,7 +1241,7 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x32100000 0x0 0x32100000 0x0 0x00100000 /* downstream I/O (1MB) */ - 0xc2000000 0x12 0x40000000 0x12 0x40000000 0x0 0x30000000 /* prefetchable memory (768MB) */ + 0xc3000000 0x12 0x40000000 0x12 0x40000000 0x0 0x30000000 /* prefetchable memory (768MB) */ 0x82000000 0x0 0x40000000 0x12 0x70000000 0x0 0x10000000>; /* non-prefetchable memory (256MB) */ }; @@ -1284,7 +1287,7 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x34100000 0x0 0x34100000 0x0 0x00100000 /* downstream I/O (1MB) */ - 0xc2000000 0x12 0x80000000 0x12 0x80000000 0x0 0x30000000 /* prefetchable memory (768MB) */ + 0xc3000000 0x12 0x80000000 0x12 0x80000000 0x0 0x30000000 /* prefetchable memory (768MB) */ 0x82000000 0x0 0x40000000 0x12 0xb0000000 0x0 0x10000000>; /* non-prefetchable memory (256MB) */ }; @@ -1330,7 +1333,7 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x36100000 0x0 0x36100000 0x0 0x00100000 /* downstream I/O (1MB) */ - 0xc2000000 0x14 0x00000000 0x14 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ + 0xc3000000 0x14 0x00000000 0x14 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ 0x82000000 0x0 0x40000000 0x17 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */ }; @@ -1376,7 +1379,7 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x38100000 0x0 0x38100000 0x0 0x00100000 /* downstream I/O (1MB) */ - 0xc2000000 0x18 0x00000000 0x18 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ + 0xc3000000 0x18 0x00000000 0x18 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ 0x82000000 0x0 0x40000000 0x1b 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */ }; @@ -1426,7 +1429,7 @@ bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x3a100000 0x0 0x3a100000 0x0 0x00100000 /* downstream I/O (1MB) */ - 0xc2000000 0x1c 0x00000000 0x1c 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ + 0xc3000000 0x1c 0x00000000 0x1c 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */ 0x82000000 0x0 0x40000000 0x1f 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */ }; diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi index 659753118e96..078d2506365c 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi @@ -1116,8 +1116,9 @@ compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci"; reg = <0x0 0x700b0000 0x0 0x200>; interrupts = ; - clocks = <&tegra_car TEGRA210_CLK_SDMMC1>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC1>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 14>; reset-names = "sdhci"; pinctrl-names = "sdmmc-3v3", "sdmmc-1v8", @@ -1144,8 +1145,9 @@ compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci"; reg = <0x0 0x700b0200 0x0 0x200>; interrupts = ; - clocks = <&tegra_car TEGRA210_CLK_SDMMC2>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC2>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 9>; reset-names = "sdhci"; pinctrl-names = "sdmmc-1v8-drv"; @@ -1161,8 +1163,9 @@ compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci"; reg = <0x0 0x700b0400 0x0 0x200>; interrupts = ; - clocks = <&tegra_car TEGRA210_CLK_SDMMC3>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC3>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 69>; reset-names = "sdhci"; pinctrl-names = "sdmmc-3v3", "sdmmc-1v8", @@ -1184,8 +1187,9 @@ compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci"; reg = <0x0 0x700b0600 0x0 0x200>; interrupts = ; - clocks = <&tegra_car TEGRA210_CLK_SDMMC4>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC4>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 15>; reset-names = "sdhci"; pinctrl-names = "sdmmc-3v3-drv", "sdmmc-1v8-drv"; diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi index 242aaea68804..38c0d74767e3 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi @@ -508,7 +508,7 @@ pins = "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; cdc_pdm_lines_sus: pdm_lines_off { @@ -521,7 +521,7 @@ pins = "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"; drive-strength = <2>; - bias-disable; + bias-pull-down; }; }; }; @@ -537,7 +537,7 @@ pins = "gpio113", "gpio114", "gpio115", "gpio116"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; @@ -565,7 +565,7 @@ pinconf { pins = "gpio110"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; @@ -591,7 +591,7 @@ pinconf { pins = "gpio116"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; ext_mclk_tlmm_lines_sus: mclk_lines_off { @@ -619,7 +619,7 @@ pins = "gpio112", "gpio117", "gpio118", "gpio119"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; ext_sec_tlmm_lines_sus: tlmm_lines_off { diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 5ea9fb8f2f87..340da154d4e3 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -212,7 +212,7 @@ thermal-sensors = <&tsens 3>; trips { - cpu2_3_alert0: trip-point@0 { + cpu2_3_alert0: trip-point0 { temperature = <75000>; hysteresis = <2000>; type = "passive"; @@ -242,7 +242,7 @@ thermal-sensors = <&tsens 2>; trips { - gpu_alert0: trip-point@0 { + gpu_alert0: trip-point0 { temperature = <75000>; hysteresis = <2000>; type = "passive"; @@ -262,7 +262,7 @@ thermal-sensors = <&tsens 1>; trips { - cam_alert0: trip-point@0 { + cam_alert0: trip-point0 { temperature = <75000>; hysteresis = <2000>; type = "hot"; @@ -277,7 +277,7 @@ thermal-sensors = <&tsens 0>; trips { - modem_alert0: trip-point@0 { + modem_alert0: trip-point0 { temperature = <85000>; hysteresis = <2000>; type = "hot"; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index fbb8ce78f95b..d303df3887d9 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -1681,16 +1681,16 @@ "csi_clk_mux", "vfe0", "vfe1"; - interrupts = , - , - , - , - , - , - , - , - , - ; + interrupts = , + , + , + , + , + , + , + , + , + ; interrupt-names = "csiphy0", "csiphy1", "csiphy2", diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi index b6e304748a57..c0b197458665 100644 --- a/arch/arm64/boot/dts/qcom/pm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi @@ -73,18 +73,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0x0 0xc0 0x0 IRQ_TYPE_NONE>, - <0x0 0xc1 0x0 IRQ_TYPE_NONE>, - <0x0 0xc2 0x0 IRQ_TYPE_NONE>, - <0x0 0xc3 0x0 IRQ_TYPE_NONE>, - <0x0 0xc4 0x0 IRQ_TYPE_NONE>, - <0x0 0xc5 0x0 IRQ_TYPE_NONE>, - <0x0 0xc6 0x0 IRQ_TYPE_NONE>, - <0x0 0xc7 0x0 IRQ_TYPE_NONE>, - <0x0 0xc8 0x0 IRQ_TYPE_NONE>, - <0x0 0xc9 0x0 IRQ_TYPE_NONE>, - <0x0 0xca 0x0 IRQ_TYPE_NONE>, - <0x0 0xcb 0x0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi index 322379d5c31f..40b5d75a4a1d 100644 --- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi @@ -62,18 +62,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0x2 0xc0 0x0 IRQ_TYPE_NONE>, - <0x2 0xc1 0x0 IRQ_TYPE_NONE>, - <0x2 0xc2 0x0 IRQ_TYPE_NONE>, - <0x2 0xc3 0x0 IRQ_TYPE_NONE>, - <0x2 0xc4 0x0 IRQ_TYPE_NONE>, - <0x2 0xc5 0x0 IRQ_TYPE_NONE>, - <0x2 0xc6 0x0 IRQ_TYPE_NONE>, - <0x2 0xc7 0x0 IRQ_TYPE_NONE>, - <0x2 0xc8 0x0 IRQ_TYPE_NONE>, - <0x2 0xc9 0x0 IRQ_TYPE_NONE>, - <0x2 0xca 0x0 IRQ_TYPE_NONE>, - <0x2 0xcb 0x0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi index eb0e9a090e42..cf05e0685d10 100644 --- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi @@ -56,18 +56,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0x4 0xc0 0x0 IRQ_TYPE_NONE>, - <0x4 0xc1 0x0 IRQ_TYPE_NONE>, - <0x4 0xc2 0x0 IRQ_TYPE_NONE>, - <0x4 0xc3 0x0 IRQ_TYPE_NONE>, - <0x4 0xc4 0x0 IRQ_TYPE_NONE>, - <0x4 0xc5 0x0 IRQ_TYPE_NONE>, - <0x4 0xc6 0x0 IRQ_TYPE_NONE>, - <0x4 0xc7 0x0 IRQ_TYPE_NONE>, - <0x4 0xc8 0x0 IRQ_TYPE_NONE>, - <0x4 0xc9 0x0 IRQ_TYPE_NONE>, - <0x4 0xca 0x0 IRQ_TYPE_NONE>, - <0x4 0xcb 0x0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi index e17311e09082..216aafd90e7f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi @@ -156,7 +156,7 @@ pinctrl-0 = <&rgmii_pins>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; - snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_HIGH>; + snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_LOW>; tx_delay = <0x10>; rx_delay = <0x10>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 62ea288a1a70..45b86933c6ea 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -101,7 +101,7 @@ vcc5v0_host: vcc5v0-host-regulator { compatible = "regulator-fixed"; - gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>; + gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>; enable-active-low; pinctrl-names = "default"; pinctrl-0 = <&vcc5v0_host_en>; @@ -157,7 +157,7 @@ phy-mode = "rgmii"; pinctrl-names = "default"; pinctrl-0 = <&rgmii_pins>; - snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>; + snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; tx_delay = <0x10>; diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 5e5dc05d63a0..619db9b4c9d5 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -73,13 +73,13 @@ static inline void apply_alternatives_module(void *start, size_t length) { } ".pushsection .altinstructions,\"a\"\n" \ ALTINSTR_ENTRY(feature) \ ".popsection\n" \ - ".pushsection .altinstr_replacement, \"a\"\n" \ + ".subsection 1\n" \ "663:\n\t" \ newinstr "\n" \ "664:\n\t" \ - ".popsection\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ - ".org . - (662b-661b) + (664b-663b)\n" \ + ".org . - (662b-661b) + (664b-663b)\n\t" \ + ".previous\n" \ ".endif\n" #define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ @@ -117,9 +117,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { } 662: .pushsection .altinstructions, "a" altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f .popsection - .pushsection .altinstr_replacement, "ax" + .subsection 1 663: \insn2 -664: .popsection +664: .previous .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) .endif @@ -160,7 +160,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { } .pushsection .altinstructions, "a" altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f .popsection - .pushsection .altinstr_replacement, "ax" + .subsection 1 .align 2 /* So GAS knows label 661 is suitably aligned */ 661: .endm @@ -179,9 +179,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { } .macro alternative_else 662: .if .Lasm_alt_mode==0 - .pushsection .altinstr_replacement, "ax" + .subsection 1 .else - .popsection + .previous .endif 663: .endm @@ -192,7 +192,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { } .macro alternative_endif 664: .if .Lasm_alt_mode==0 - .popsection + .previous .endif .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 89e4c8b79349..ee9bdaa40532 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void) return read_sysreg_s(SYS_ICC_PMR_EL1); } -static inline void gic_write_pmr(u32 val) +static __always_inline void gic_write_pmr(u32 val) { write_sysreg_s(val, SYS_ICC_PMR_EL1); } diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index 7ae54d7d333a..9f0ec21d6327 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -58,6 +58,7 @@ struct arch_timer_erratum_workaround { u64 (*read_cntvct_el0)(void); int (*set_next_event_phys)(unsigned long, struct clock_event_device *); int (*set_next_event_virt)(unsigned long, struct clock_event_device *); + bool disable_compat_vdso; }; DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 665c78e0665a..3e7dda6f1ab1 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -79,7 +79,7 @@ static inline void flush_icache_range(unsigned long start, unsigned long end) * IPI all online CPUs so that they undergo a context synchronization * event and are forced to refetch the new instructions. */ -#ifdef CONFIG_KGDB + /* * KGDB performs cache maintenance with interrupts disabled, so we * will deadlock trying to IPI the secondary CPUs. In theory, we can @@ -89,9 +89,9 @@ static inline void flush_icache_range(unsigned long start, unsigned long end) * the patching operation, so we don't need extra IPIs here anyway. * In which case, add a KGDB-specific bodge and return early. */ - if (kgdb_connected && irqs_disabled()) + if (in_dbg_master()) return; -#endif + kick_all_cpus_sync(); } diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index d064a50deb5f..5665a3fc14be 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -19,16 +19,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { __uint128_t tmp; u64 sum; + int n = ihl; /* we want it signed */ tmp = *(const __uint128_t *)iph; iph += 16; - ihl -= 4; + n -= 4; tmp += ((tmp >> 64) | (tmp << 64)); sum = tmp >> 64; do { sum += *(const u32 *)iph; iph += 4; - } while (--ihl); + } while (--n > 0); sum += ((sum >> 32) | (sum << 32)); return csum_fold((__force u32)(sum >> 32)); diff --git a/arch/arm64/include/asm/clocksource.h b/arch/arm64/include/asm/clocksource.h index 0ece64a26c8c..0c7910447235 100644 --- a/arch/arm64/include/asm/clocksource.h +++ b/arch/arm64/include/asm/clocksource.h @@ -2,8 +2,11 @@ #ifndef _ASM_CLOCKSOURCE_H #define _ASM_CLOCKSOURCE_H +#include + struct arch_clocksource_data { - bool vdso_direct; /* Usable for direct VDSO access? */ + /* Usable for direct VDSO access? */ + enum vdso_arch_clockmode clock_mode; }; #endif diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 9cde5d2e768f..10d3048dec7c 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -601,7 +601,7 @@ static inline bool system_supports_generic_auth(void) cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF)); } -static inline bool system_uses_irq_prio_masking(void) +static __always_inline bool system_uses_irq_prio_masking(void) { return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 7619f473155f..d825e3585e28 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -109,6 +109,8 @@ void disable_debug_monitors(enum dbg_active_el el); void user_rewind_single_step(struct task_struct *task); void user_fastforward_single_step(struct task_struct *task); +void user_regs_reset_single_step(struct user_pt_regs *regs, + struct task_struct *task); void kernel_enable_single_step(struct pt_regs *regs); void kernel_disable_single_step(void); diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index ddf9d762ac62..a4ffd9b55e72 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -72,11 +72,12 @@ * IMO: Override CPSR.I and enable signaling with VI * FMO: Override CPSR.F and enable signaling with VF * SWIO: Turn set/way invalidates into set/way clean+invalidate + * PTW: Take a stage2 fault if a stage1 walk steps in device memory */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ - HCR_FMO | HCR_IMO) + HCR_FMO | HCR_IMO | HCR_PTW ) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 44a243754c1b..64d79b288434 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -88,6 +88,34 @@ extern u32 __kvm_get_mdcr_el2(void); *__hyp_this_cpu_ptr(sym); \ }) +#define __KVM_EXTABLE(from, to) \ + " .pushsection __kvm_ex_table, \"a\"\n" \ + " .align 3\n" \ + " .long (" #from " - .), (" #to " - .)\n" \ + " .popsection\n" + + +#define __kvm_at(at_op, addr) \ +( { \ + int __kvm_at_err = 0; \ + u64 spsr, elr; \ + asm volatile( \ + " mrs %1, spsr_el2\n" \ + " mrs %2, elr_el2\n" \ + "1: at "at_op", %3\n" \ + " isb\n" \ + " b 9f\n" \ + "2: msr spsr_el2, %1\n" \ + " msr elr_el2, %2\n" \ + " mov %w0, %4\n" \ + "9:\n" \ + __KVM_EXTABLE(1b, 2b) \ + : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ + : "r" (addr), "i" (-EFAULT)); \ + __kvm_at_err; \ +} ) + + #else /* __ASSEMBLY__ */ .macro hyp_adr_this_cpu reg, sym, tmp @@ -113,6 +141,21 @@ extern u32 __kvm_get_mdcr_el2(void); kern_hyp_va \vcpu .endm +/* + * KVM extable for unexpected exceptions. + * In the same format _asm_extable, but output to a different section so that + * it can be mapped to EL2. The KVM version is not sorted. The caller must + * ensure: + * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented + * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. + */ +.macro _kvm_extable, from, to + .pushsection __kvm_ex_table, "a" + .align 3 + .long (\from - .), (\to - .) + .popsection +.endm + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 0c3bd6aff6e9..d719c6b4dd81 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -427,7 +427,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, unsigned flags); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 9e570b1c5a7b..233f7828a6cd 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -55,7 +55,7 @@ #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) -#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) +#define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN) #define PAGE_S2_MEMATTR(attr) \ ({ \ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 8165b71792cf..cc9564ce57a0 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -461,6 +461,7 @@ extern pgd_t init_pg_dir[PTRS_PER_PGD]; extern pgd_t init_pg_end[]; extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; +extern pgd_t idmap_pg_end[]; extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index 7a24bad1a58b..076a4157a74f 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -3,7 +3,6 @@ #define __ASM_POINTER_AUTH_H #include -#include #include #include @@ -30,6 +29,13 @@ struct ptrauth_keys { struct ptrauth_key apga; }; +/* + * Only include random.h once ptrauth_keys_* structures are defined + * to avoid yet another circular include hell (random.h * ends up + * including asm/smp.h, which requires ptrauth_keys_kernel). + */ +#include + static inline void ptrauth_keys_init(struct ptrauth_keys *keys) { if (system_supports_address_auth()) { diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index a0c8a0b65259..0eadbf933e35 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -46,7 +46,12 @@ DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); * Logical CPU mapping. */ extern u64 __cpu_logical_map[NR_CPUS]; -#define cpu_logical_map(cpu) __cpu_logical_map[cpu] +extern u64 cpu_logical_map(int cpu); + +static inline void set_cpu_logical_map(int cpu, u64 hwid) +{ + __cpu_logical_map[cpu] = hwid; +} struct seq_file; diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 65299a2dcf9c..cfc0672013f6 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -34,6 +34,10 @@ static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { unsigned long error = regs->regs[0]; + + if (is_compat_thread(task_thread_info(task))) + error = sign_extend64(error, 31); + return IS_ERR_VALUE(error) ? error : 0; } @@ -47,7 +51,13 @@ static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { - regs->regs[0] = (long) error ? error : val; + if (error) + val = error; + + if (is_compat_thread(task_thread_info(task))) + val = lower_32_bits(val); + + regs->regs[0] = val; } #define SYSCALL_MAX_ARGS 6 diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index f0cec4160136..4e3ed702bec7 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -91,6 +91,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_FSCHECK (1 << TIF_FSCHECK) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_SVE (1 << TIF_SVE) diff --git a/arch/arm64/include/asm/vdso/clocksource.h b/arch/arm64/include/asm/vdso/clocksource.h new file mode 100644 index 000000000000..8019f616e1f7 --- /dev/null +++ b/arch/arm64/include/asm/vdso/clocksource.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSOCLOCKSOURCE_H +#define __ASM_VDSOCLOCKSOURCE_H + +enum vdso_arch_clockmode { + /* vdso clocksource not usable */ + VDSO_CLOCKMODE_NONE, + /* vdso clocksource for both 32 and 64bit tasks */ + VDSO_CLOCKMODE_ARCHTIMER, + /* vdso clocksource for 64bit tasks only */ + VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT, +}; + +#endif diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h index c50ee1b7d5cd..413d42e197c7 100644 --- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -10,6 +10,7 @@ #include #include +#include #include #define __VDSO_USE_SYSCALL ULLONG_MAX @@ -117,10 +118,10 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) u64 res; /* - * clock_mode == 0 implies that vDSO are enabled otherwise + * clock_mode == ARCHTIMER implies that vDSO are enabled otherwise * fallback on syscall. */ - if (clock_mode) + if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER) return __VDSO_USE_SYSCALL; /* diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h index b08f476b72b4..ff83b8b574fc 100644 --- a/arch/arm64/include/asm/vdso/gettimeofday.h +++ b/arch/arm64/include/asm/vdso/gettimeofday.h @@ -10,6 +10,8 @@ #include #include +#include + #define __VDSO_USE_SYSCALL ULLONG_MAX #define VDSO_HAS_CLOCK_GETRES 1 @@ -71,10 +73,10 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) u64 res; /* - * clock_mode == 0 implies that vDSO are enabled otherwise + * clock_mode != NONE implies that vDSO are enabled otherwise * fallback on syscall. */ - if (clock_mode) + if (clock_mode == VDSO_CLOCKMODE_NONE) return __VDSO_USE_SYSCALL; /* diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h index 0c20a7c1bee5..e50f26741946 100644 --- a/arch/arm64/include/asm/vdso/vsyscall.h +++ b/arch/arm64/include/asm/vdso/vsyscall.h @@ -24,9 +24,7 @@ struct vdso_data *__arm64_get_k_vdso_data(void) static __always_inline int __arm64_get_clock_mode(struct timekeeper *tk) { - u32 use_syscall = !tk->tkr_mono.clock->archdata.vdso_direct; - - return use_syscall; + return tk->tkr_mono.clock->archdata.clock_mode; } #define __arch_get_clock_mode __arm64_get_clock_mode diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index d1757ef1b1e7..73039949b5ce 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -43,20 +43,8 @@ bool alternative_is_applied(u16 cpufeature) */ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) { - unsigned long replptr; - - if (kernel_text_address(pc)) - return true; - - replptr = (unsigned long)ALT_REPL_PTR(alt); - if (pc >= replptr && pc <= (replptr + alt->alt_len)) - return false; - - /* - * Branching into *another* alternate sequence is doomed, and - * we're not even trying to fix it up. - */ - BUG(); + unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); + return !(pc >= replptr && pc <= (replptr + alt->alt_len)); } #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 0b2830379fe0..d2e738c45556 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -917,6 +917,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .desc = "ARM erratum 1418040", .capability = ARM64_WORKAROUND_1418040, ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), + /* + * We need to allow affected CPUs to come in late, but + * also need the non-affected CPUs to be able to come + * in at any point in time. Wonderful. + */ + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, }, #endif #ifdef CONFIG_ARM64_ERRATUM_1165522 diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 48222a4760c2..d64a3c1e1b6b 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -141,17 +141,20 @@ postcore_initcall(debug_monitors_init); /* * Single step API and exception handling. */ -static void set_regs_spsr_ss(struct pt_regs *regs) +static void set_user_regs_spsr_ss(struct user_pt_regs *regs) { regs->pstate |= DBG_SPSR_SS; } -NOKPROBE_SYMBOL(set_regs_spsr_ss); +NOKPROBE_SYMBOL(set_user_regs_spsr_ss); -static void clear_regs_spsr_ss(struct pt_regs *regs) +static void clear_user_regs_spsr_ss(struct user_pt_regs *regs) { regs->pstate &= ~DBG_SPSR_SS; } -NOKPROBE_SYMBOL(clear_regs_spsr_ss); +NOKPROBE_SYMBOL(clear_user_regs_spsr_ss); + +#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs) +#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs) static DEFINE_SPINLOCK(debug_hook_lock); static LIST_HEAD(user_step_hook); @@ -393,17 +396,26 @@ void user_rewind_single_step(struct task_struct *task) * If single step is active for this thread, then set SPSR.SS * to 1 to avoid returning to the active-pending state. */ - if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) set_regs_spsr_ss(task_pt_regs(task)); } NOKPROBE_SYMBOL(user_rewind_single_step); void user_fastforward_single_step(struct task_struct *task) { - if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) clear_regs_spsr_ss(task_pt_regs(task)); } +void user_regs_reset_single_step(struct user_pt_regs *regs, + struct task_struct *task) +{ + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) + set_user_regs_spsr_ss(regs); + else + clear_user_regs_spsr_ss(regs); +} + /* Kernel API */ void kernel_enable_single_step(struct pt_regs *regs) { diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 1765e5284994..04b982a2799e 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -338,7 +338,7 @@ static unsigned int find_supported_vector_length(unsigned int vl) return sve_vl_from_vq(__bit_to_vq(bit)); } -#ifdef CONFIG_SYSCTL +#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL) static int sve_proc_do_default_vl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, @@ -384,9 +384,9 @@ static int __init sve_sysctl_init(void) return 0; } -#else /* ! CONFIG_SYSCTL */ +#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */ static int __init sve_sysctl_init(void) { return 0; } -#endif /* ! CONFIG_SYSCTL */ +#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */ #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 989b1944cb71..bdb5ec341900 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -393,13 +393,19 @@ __create_page_tables: /* * Since the page tables have been populated with non-cacheable - * accesses (MMU disabled), invalidate the idmap and swapper page - * tables again to remove any speculatively loaded cache lines. + * accesses (MMU disabled), invalidate those tables again to + * remove any speculatively loaded cache lines. */ + dmb sy + adrp x0, idmap_pg_dir + adrp x1, idmap_pg_end + sub x1, x1, x0 + bl __inval_dcache_area + + adrp x0, init_pg_dir adrp x1, init_pg_end sub x1, x1, x0 - dmb sy bl __inval_dcache_area ret x28 diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 38ee1514cd9c..b4a160795824 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -730,6 +730,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, return 0; } +static int watchpoint_report(struct perf_event *wp, unsigned long addr, + struct pt_regs *regs) +{ + int step = is_default_overflow_handler(wp); + struct arch_hw_breakpoint *info = counter_arch_bp(wp); + + info->trigger = addr; + + /* + * If we triggered a user watchpoint from a uaccess routine, then + * handle the stepping ourselves since userspace really can't help + * us with this. + */ + if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0) + step = 1; + else + perf_bp_event(wp, regs); + + return step; +} + static int watchpoint_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -739,7 +760,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, u64 val; struct perf_event *wp, **slots; struct debug_info *debug_info; - struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(wp_on_reg); @@ -777,25 +797,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, if (dist != 0) continue; - info = counter_arch_bp(wp); - info->trigger = addr; - perf_bp_event(wp, regs); - - /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(wp)) - step = 1; + step = watchpoint_report(wp, addr, regs); } - if (min_dist > 0 && min_dist != -1) { - /* No exact match found. */ - wp = slots[closest_match]; - info = counter_arch_bp(wp); - info->trigger = addr; - perf_bp_event(wp, regs); - /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(wp)) - step = 1; - } + /* No exact match found? */ + if (min_dist > 0 && min_dist != -1) + step = watchpoint_report(slots[closest_match], addr, regs); + rcu_read_unlock(); if (!step) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index d801a7094076..a612da533ea2 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -1508,16 +1508,10 @@ static u32 aarch64_encode_immediate(u64 imm, u32 insn) { unsigned int immr, imms, n, ones, ror, esz, tmp; - u64 mask = ~0UL; - - /* Can't encode full zeroes or full ones */ - if (!imm || !~imm) - return AARCH64_BREAK_FAULT; + u64 mask; switch (variant) { case AARCH64_INSN_VARIANT_32BIT: - if (upper_32_bits(imm)) - return AARCH64_BREAK_FAULT; esz = 32; break; case AARCH64_INSN_VARIANT_64BIT: @@ -1529,6 +1523,12 @@ static u32 aarch64_encode_immediate(u64 imm, return AARCH64_BREAK_FAULT; } + mask = GENMASK(esz - 1, 0); + + /* Can't encode full zeroes, full ones, or value wider than the mask */ + if (!imm || imm == mask || imm & ~mask) + return AARCH64_BREAK_FAULT; + /* * Inverse of Replicate(). Try to spot a repeating pattern * with a pow2 stride. diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 43119922341f..1a157ca33262 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -252,7 +252,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) if (!kgdb_single_step) return DBG_HOOK_ERROR; - kgdb_handle_exception(1, SIGTRAP, 0, regs); + kgdb_handle_exception(0, SIGTRAP, 0, regs); return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_step_brk_fn); diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index b182442b87a3..426018ebb700 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -270,8 +270,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.core.plt_shndx = i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) mod->arch.init.plt_shndx = i; - else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && - !strcmp(secstrings + sechdrs[i].sh_name, + else if (!strcmp(secstrings + sechdrs[i].sh_name, ".text.ftrace_trampoline")) tramp = sechdrs + i; else if (sechdrs[i].sh_type == SHT_SYMTAB) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index a0b4f1bca491..19128d994ee9 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -155,7 +155,7 @@ armv8pmu_events_sysfs_show(struct device *dev, pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); - return sprintf(page, "event=0x%03llx\n", pmu_attr->id); + return sprintf(page, "event=0x%04llx\n", pmu_attr->id); } #define ARMV8_EVENT_ATTR(name, config) \ @@ -303,10 +303,13 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj, test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap)) return attr->mode; - pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE; - if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS && - test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap)) - return attr->mode; + if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) { + u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE; + + if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS && + test_bit(id, cpu_pmu->pmceid_ext_bitmap)) + return attr->mode; + } return 0; } diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 0bbac612146e..666b225aeb3a 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return 0; /* - * Compat (i.e. 32 bit) mode: - * - PC has been set in the pt_regs struct in kernel_entry, - * - Handle SP and LR here. + * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but + * we're stuck with it for ABI compatability reasons. + * + * For a 32-bit consumer inspecting a 32-bit task, then it will look at + * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h). + * These correspond directly to a prefix of the registers saved in our + * 'struct pt_regs', with the exception of the PC, so we copy that down + * (x15 corresponds to SP_hyp in the architecture). + * + * So far, so good. + * + * The oddity arises when a 64-bit consumer looks at a 32-bit task and + * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return + * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and + * PC registers would normally live. The initial idea was to allow a + * 64-bit unwinder to unwind a 32-bit task and, although it's not clear + * how well that works in practice, somebody might be relying on it. + * + * At the time we make a sample, we don't know whether the consumer is + * 32-bit or 64-bit, so we have to cater for both possibilities. */ if (compat_user_mode(regs)) { if ((u32)idx == PERF_REG_ARM64_SP) return regs->compat_sp; if ((u32)idx == PERF_REG_ARM64_LR) return regs->compat_lr; + if (idx == 15) + return regs->pc; } if ((u32)idx == PERF_REG_ARM64_SP) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index fab013c5ee8c..10190c4b16dc 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -498,6 +498,39 @@ static void entry_task_switch(struct task_struct *next) __this_cpu_write(__entry_task, next); } +/* + * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. + * Assuming the virtual counter is enabled at the beginning of times: + * + * - disable access when switching from a 64bit task to a 32bit task + * - enable access when switching from a 32bit task to a 64bit task + */ +static void erratum_1418040_thread_switch(struct task_struct *prev, + struct task_struct *next) +{ + bool prev32, next32; + u64 val; + + if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) && + cpus_have_const_cap(ARM64_WORKAROUND_1418040))) + return; + + prev32 = is_compat_thread(task_thread_info(prev)); + next32 = is_compat_thread(task_thread_info(next)); + + if (prev32 == next32) + return; + + val = read_sysreg(cntkctl_el1); + + if (!next32) + val |= ARCH_TIMER_USR_VCT_ACCESS_EN; + else + val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN; + + write_sysreg(val, cntkctl_el1); +} + /* * Thread switching. */ @@ -514,6 +547,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, uao_thread_switch(next); ptrauth_thread_switch(next); ssbs_thread_switch(next); + erratum_1418040_thread_switch(prev, next); /* * Complete any pending TLB or cache maintenance on this CPU in case diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 8d2d9d5b418f..30b877f8b85e 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1819,12 +1819,23 @@ static void tracehook_report_syscall(struct pt_regs *regs, saved_reg = regs->regs[regno]; regs->regs[regno] = dir; - if (dir == PTRACE_SYSCALL_EXIT) + if (dir == PTRACE_SYSCALL_ENTER) { + if (tracehook_report_syscall_entry(regs)) + forget_syscall(regs); + regs->regs[regno] = saved_reg; + } else if (!test_thread_flag(TIF_SINGLESTEP)) { tracehook_report_syscall_exit(regs, 0); - else if (tracehook_report_syscall_entry(regs)) - forget_syscall(regs); + regs->regs[regno] = saved_reg; + } else { + regs->regs[regno] = saved_reg; - regs->regs[regno] = saved_reg; + /* + * Signal a pseudo-step exception since we are stepping but + * tracer modifications to the registers may have rewound the + * state machine. + */ + tracehook_report_syscall_exit(regs, 1); + } } int syscall_trace_enter(struct pt_regs *regs) @@ -1852,12 +1863,14 @@ int syscall_trace_enter(struct pt_regs *regs) void syscall_trace_exit(struct pt_regs *regs) { + unsigned long flags = READ_ONCE(current_thread_info()->flags); + audit_syscall_exit(regs); - if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + if (flags & _TIF_SYSCALL_TRACEPOINT) trace_sys_exit(regs, regs_return_value(regs)); - if (test_thread_flag(TIF_SYSCALL_TRACE)) + if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); rseq_syscall(regs); @@ -1935,8 +1948,8 @@ static int valid_native_regs(struct user_pt_regs *regs) */ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) { - if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) - regs->pstate &= ~DBG_SPSR_SS; + /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ + user_regs_reset_single_step(regs, task); if (is_compat_thread(task_thread_info(task))) return valid_compat_regs(regs); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 56f664561754..d98987b82874 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -85,7 +85,7 @@ u64 __cacheline_aligned boot_args[4]; void __init smp_setup_processor_id(void) { u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; - cpu_logical_map(0) = mpidr; + set_cpu_logical_map(0, mpidr); /* * clear __my_cpu_offset on boot CPU to avoid hang caused by @@ -276,6 +276,12 @@ arch_initcall(reserve_memblock_reserved_regions); u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; +u64 cpu_logical_map(int cpu) +{ + return __cpu_logical_map[cpu]; +} +EXPORT_SYMBOL_GPL(cpu_logical_map); + void __init setup_arch(char **cmdline_p) { init_mm.start_code = (unsigned long) _text; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index dd2cdc0d5be2..ddb757b2c3e5 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -782,7 +782,6 @@ static void setup_restart_syscall(struct pt_regs *regs) */ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { - struct task_struct *tsk = current; sigset_t *oldset = sigmask_to_save(); int usig = ksig->sig; int ret; @@ -806,14 +805,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) */ ret |= !valid_user_regs(®s->user_regs, current); - /* - * Fast forward the stepping logic so we step into the signal - * handler. - */ - if (!ret) - user_fastforward_single_step(tsk); - - signal_setup_done(ret, ksig, 0); + /* Step into the signal handler if we are stepping */ + signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); } /* diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 993a4aedfd37..102dc3e7f2e1 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -549,7 +549,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) return; /* map the logical cpu id to cpu MPIDR */ - cpu_logical_map(cpu_count) = hwid; + set_cpu_logical_map(cpu_count, hwid); cpu_madt_gicc[cpu_count] = *processor; @@ -663,7 +663,7 @@ static void __init of_parse_and_init_cpus(void) goto next; pr_debug("cpu logical map 0x%llx\n", hwid); - cpu_logical_map(cpu_count) = hwid; + set_cpu_logical_map(cpu_count, hwid); early_map_cpu_to_node(cpu_count, of_node_to_nid(dn)); next: @@ -704,7 +704,7 @@ void __init smp_init_cpus(void) for (i = 1; i < nr_cpu_ids; i++) { if (cpu_logical_map(i) != INVALID_HWID) { if (smp_cpu_setup(i)) - cpu_logical_map(i) = INVALID_HWID; + set_cpu_logical_map(i, INVALID_HWID); } } } diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 871c739f060a..1457a0ba83db 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -50,6 +50,9 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, ret = do_ni_syscall(regs, scno); } + if (is_compat_task()) + ret = lower_32_bits(ret); + regs->regs[0] = ret; } @@ -121,7 +124,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) { local_daif_mask(); flags = current_thread_info()->flags; - if (!has_syscall_work(flags)) { + if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) { /* * We're off to userspace, where interrupts are * always enabled after we restore the flags from diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 76b327f88fbb..40dffe60b845 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -190,7 +190,7 @@ quiet_cmd_vdsosym = VDSOSYM $@ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ # Install commands for the unstripped file -quiet_cmd_vdso_install = INSTALL $@ +quiet_cmd_vdso_install = INSTALL32 $@ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so vdso.so: $(obj)/vdso.so.dbg diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index aa76f7259668..4f77de8ce138 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -24,6 +24,13 @@ ENTRY(_text) jiffies = jiffies_64; + +#define HYPERVISOR_EXTABLE \ + . = ALIGN(SZ_8); \ + __start___kvm_ex_table = .; \ + *(__kvm_ex_table) \ + __stop___kvm_ex_table = .; + #define HYPERVISOR_TEXT \ /* \ * Align to 4 KB so that \ @@ -39,6 +46,7 @@ jiffies = jiffies_64; __hyp_idmap_text_end = .; \ __hyp_text_start = .; \ *(.hyp.text) \ + HYPERVISOR_EXTABLE \ __hyp_text_end = .; #define IDMAP_TEXT \ @@ -142,6 +150,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); idmap_pg_dir = .; . += IDMAP_DIR_SIZE; + idmap_pg_end = .; #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 tramp_pg_dir = .; @@ -171,9 +180,6 @@ SECTIONS *(.altinstructions) __alt_instructions_end = .; } - .altinstr_replacement : { - *(.altinstr_replacement) - } . = ALIGN(PAGE_SIZE); __inittext_end = .; diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 160be2b4696d..dc41b505507d 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -136,11 +136,15 @@ ENTRY(__kvm_handle_stub_hvc) 1: cmp x0, #HVC_RESET_VECTORS b.ne 1f -reset: + /* - * Reset kvm back to the hyp stub. Do not clobber x0-x4 in - * case we coming via HVC_SOFT_RESTART. + * Set the HVC_RESET_VECTORS return code before entering the common + * path so that we do not clobber x0-x2 in case we are coming via + * HVC_SOFT_RESTART. */ + mov x0, xzr +reset: + /* Reset kvm back to the hyp stub. */ mrs x5, sctlr_el2 ldr x6, =SCTLR_ELx_FLAGS bic x5, x5, x6 // Clear SCTL_M and etc @@ -151,7 +155,6 @@ reset: /* Install stub vectors */ adr_l x5, __hyp_stub_vectors msr vbar_el2, x5 - mov x0, xzr eret 1: /* Bad stub call */ diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index e5cc8d66bf53..dc3d7bc2292f 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -173,20 +173,23 @@ alternative_endif // This is our single instruction exception window. A pending // SError is guaranteed to occur at the earliest when we unmask // it, and at the latest just after the ISB. - .global abort_guest_exit_start abort_guest_exit_start: isb - .global abort_guest_exit_end abort_guest_exit_end: msr daifset, #4 // Mask aborts + ret - // If the exception took place, restore the EL1 exception - // context so that we can report some information. - // Merge the exception code with the SError pending bit. - tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f + _kvm_extable abort_guest_exit_start, 9997f + _kvm_extable abort_guest_exit_end, 9997f +9997: + msr daifset, #4 // Mask aborts + mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) + + // restore the EL1 exception context so that we can report some + // information. Merge the exception code with the SError pending bit. msr elr_el2, x2 msr esr_el2, x3 msr spsr_el2, x4 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index ffa68d5713f1..f36aad0f207b 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -15,6 +15,30 @@ #include #include +.macro save_caller_saved_regs_vect + /* x0 and x1 were saved in the vector entry */ + stp x2, x3, [sp, #-16]! + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! +.endm + +.macro restore_caller_saved_regs_vect + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +.endm + .text .pushsection .hyp.text, "ax" @@ -142,13 +166,19 @@ el1_error: b __guest_exit el2_sync: - /* Check for illegal exception return, otherwise panic */ + /* Check for illegal exception return */ mrs x0, spsr_el2 + tbnz x0, #20, 1f - /* if this was something else, then panic! */ - tst x0, #PSR_IL_BIT - b.eq __hyp_panic + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + bl kvm_unexpected_el2_exception + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect + eret + +1: /* Let's attempt a recovery from the illegal exception return */ get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_IL @@ -156,27 +186,14 @@ el2_sync: el2_error: - ldp x0, x1, [sp], #16 + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + + bl kvm_unexpected_el2_exception + + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect - /* - * Only two possibilities: - * 1) Either we come from the exit path, having just unmasked - * PSTATE.A: change the return code to an EL2 fault, and - * carry on, as we're already in a sane state to handle it. - * 2) Or we come from anywhere else, and that's a bug: we panic. - * - * For (1), x0 contains the original return code and x1 doesn't - * contain anything meaningful at that stage. We can reuse them - * as temp registers. - * For (2), who cares? - */ - mrs x0, elr_el2 - adr x1, abort_guest_exit_start - cmp x0, x1 - adr x1, abort_guest_exit_end - ccmp x0, x1, #4, ne - b.ne __hyp_panic - mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) eret sb diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index d76a3d39b269..65660b614474 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -25,6 +26,9 @@ #include #include +extern struct exception_table_entry __start___kvm_ex_table; +extern struct exception_table_entry __stop___kvm_ex_table; + /* Check whether the FP regs were dirtied while in the host-side run loop: */ static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu) { @@ -257,10 +261,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) * saved the guest context yet, and we may return early... */ par = read_sysreg(par_el1); - asm volatile("at s1e1r, %0" : : "r" (far)); - isb(); - - tmp = read_sysreg(par_el1); + if (!__kvm_at("s1e1r", far)) + tmp = read_sysreg(par_el1); + else + tmp = SYS_PAR_EL1_F; /* back to the guest */ write_sysreg(par, par_el1); if (unlikely(tmp & SYS_PAR_EL1_F)) @@ -754,7 +758,7 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par, * making sure it is a kernel address and not a PC-relative * reference. */ - asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va)); + asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string)); __hyp_do_panic(str_va, spsr, elr, @@ -791,3 +795,30 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) unreachable(); } + +asmlinkage void __hyp_text kvm_unexpected_el2_exception(void) +{ + unsigned long addr, fixup; + struct kvm_cpu_context *host_ctxt; + struct exception_table_entry *entry, *end; + unsigned long elr_el2 = read_sysreg(elr_el2); + + entry = hyp_symbol_addr(__start___kvm_ex_table); + end = hyp_symbol_addr(__stop___kvm_ex_table); + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; + + while (entry < end) { + addr = (unsigned long)&entry->insn + entry->insn; + fixup = (unsigned long)&entry->fixup + entry->fixup; + + if (addr != elr_el2) { + entry++; + continue; + } + + write_sysreg(fixup, elr_el2); + return; + } + + hyp_panic(host_ctxt); +} diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f4a8ae918827..784d485218ca 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -258,7 +258,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) int kvm_reset_vcpu(struct kvm_vcpu *vcpu) { const struct kvm_regs *cpu_reset; - int ret = -EINVAL; + int ret; bool loaded; /* Reset PMU outside of the non-preemptible section */ @@ -281,15 +281,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { - if (kvm_vcpu_enable_ptrauth(vcpu)) + if (kvm_vcpu_enable_ptrauth(vcpu)) { + ret = -EINVAL; goto out; + } } switch (vcpu->arch.target) { default: if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { - if (!cpu_has_32bit_el1()) + if (!cpu_has_32bit_el1()) { + ret = -EINVAL; goto out; + } cpu_reset = &default_regs_reset32; } else { cpu_reset = &default_regs_reset; diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index cdc79de0c794..945e5f690ede 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -141,14 +141,17 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val, } } -static inline int bpf2a64_offset(int bpf_to, int bpf_from, +static inline int bpf2a64_offset(int bpf_insn, int off, const struct jit_ctx *ctx) { - int to = ctx->offset[bpf_to]; - /* -1 to account for the Branch instruction */ - int from = ctx->offset[bpf_from] - 1; - - return to - from; + /* BPF JMP offset is relative to the next instruction */ + bpf_insn++; + /* + * Whereas arm64 branch instructions encode the offset + * from the branch itself, so we must subtract 1 from the + * instruction offset. + */ + return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1); } static void jit_fill_hole(void *area, unsigned int size) @@ -532,7 +535,7 @@ emit_bswap_uxt: /* JUMP off */ case BPF_JMP | BPF_JA: - jmp_offset = bpf2a64_offset(i + off, i, ctx); + jmp_offset = bpf2a64_offset(i, off, ctx); check_imm26(jmp_offset); emit(A64_B(jmp_offset), ctx); break; @@ -559,7 +562,7 @@ emit_bswap_uxt: case BPF_JMP32 | BPF_JSLE | BPF_X: emit(A64_CMP(is64, dst, src), ctx); emit_cond_jmp: - jmp_offset = bpf2a64_offset(i + off, i, ctx); + jmp_offset = bpf2a64_offset(i, off, ctx); check_imm19(jmp_offset); switch (BPF_OP(code)) { case BPF_JEQ: @@ -780,10 +783,21 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass) const struct bpf_prog *prog = ctx->prog; int i; + /* + * - offset[0] offset of the end of prologue, + * start of the 1st instruction. + * - offset[1] - offset of the end of 1st instruction, + * start of the 2nd instruction + * [....] + * - offset[3] - offset of the end of 3rd instruction, + * start of 4th instruction + */ for (i = 0; i < prog->len; i++) { const struct bpf_insn *insn = &prog->insnsi[i]; int ret; + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; ret = build_insn(insn, ctx, extra_pass); if (ret > 0) { i++; @@ -791,11 +805,16 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass) ctx->offset[i] = ctx->idx; continue; } - if (ctx->image == NULL) - ctx->offset[i] = ctx->idx; if (ret) return ret; } + /* + * offset is allocated with prog->len + 1 so fill in + * the last element with the offset after the last + * instruction (end of program) + */ + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; return 0; } @@ -871,7 +890,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; - ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); + ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL); if (ctx.offset == NULL) { prog = orig_prog; goto out_off; @@ -951,7 +970,7 @@ skip_init_ctx: prog->jited_len = image_size; if (!prog->is_func || extra_pass) { - bpf_prog_fill_jited_linfo(prog, ctx.offset); + bpf_prog_fill_jited_linfo(prog, ctx.offset + 1); out_off: kfree(ctx.offset); kfree(jit_data); diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index 32240000dc0c..2876a7df1b0a 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -40,7 +40,7 @@ $(error Sorry, you need a newer version of the assember, one that is built from endif quiet_cmd_gzip = GZIP $@ -cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@ +cmd_gzip = cat $(real-prereqs) | $(KGZIP) -n -f -9 > $@ quiet_cmd_objcopy = OBJCOPY $@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@ diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 8c92e095f8bb..d42f79a33e91 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig @@ -35,7 +35,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_CHR_DEV_OSST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index 5d9288384096..0415d28dbe4f 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -135,10 +135,10 @@ vmlinux.gz: vmlinux ifndef CONFIG_KGDB cp vmlinux vmlinux.tmp $(STRIP) vmlinux.tmp - gzip -9c vmlinux.tmp >vmlinux.gz + $(KGZIP) -9c vmlinux.tmp >vmlinux.gz rm vmlinux.tmp else - gzip -9c vmlinux >vmlinux.gz + $(KGZIP) -9c vmlinux >vmlinux.gz endif bzImage: vmlinux.bz2 @@ -148,10 +148,10 @@ vmlinux.bz2: vmlinux ifndef CONFIG_KGDB cp vmlinux vmlinux.tmp $(STRIP) vmlinux.tmp - bzip2 -1c vmlinux.tmp >vmlinux.bz2 + $(KBZIP2) -1c vmlinux.tmp >vmlinux.bz2 rm vmlinux.tmp else - bzip2 -1c vmlinux >vmlinux.bz2 + $(KBZIP2) -1c vmlinux >vmlinux.bz2 endif archclean: diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c index 62b0eb6cf69a..84eab0f5e00a 100644 --- a/arch/m68k/coldfire/pci.c +++ b/arch/m68k/coldfire/pci.c @@ -216,8 +216,10 @@ static int __init mcf_pci_init(void) /* Keep a virtual mapping to IO/config space active */ iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE); - if (iospace == 0) + if (iospace == 0) { + pci_free_host_bridge(bridge); return -ENODEV; + } pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n", (u32) iospace); diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 9a33c1c006a1..cf8103fa2f34 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -334,7 +334,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 7fdbc797a05d..5636288a4b45 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -319,7 +319,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index f1763405a539..015a7f401ffd 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -334,7 +334,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 91154d6acb31..1209430e61e1 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -316,7 +316,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index c398c4a94d95..a41b16067f5c 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -318,7 +318,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 350d004559be..8af104a8c000 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -325,7 +325,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index b838dd820348..354ff30e22c9 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -358,7 +358,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 3f8dd61559cf..eac7685cea42 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -315,7 +315,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index ae3b2d4f636c..0f38c4a3c87a 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -316,7 +316,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index cd61ef14b582..6ede6869db1c 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -324,7 +324,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 151f5371cd3d..8644c4789938 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -313,7 +313,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 1dcb0ee1fe98..f2fd0da2346e 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -313,7 +313,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SAS_ATTRS=m diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h index 9138a624c5c8..692f90e7fecc 100644 --- a/arch/m68k/include/asm/m53xxacr.h +++ b/arch/m68k/include/asm/m53xxacr.h @@ -89,9 +89,9 @@ * coherency though in all cases. And for copyback caches we will need * to push cached data as well. */ -#define CACHE_INIT CACR_CINVA -#define CACHE_INVALIDATE CACR_CINVA -#define CACHE_INVALIDATED CACR_CINVA +#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC) +#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA) +#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA) #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \ (0x000f0000) + \ diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h index de1470c4d829..1149251ea58d 100644 --- a/arch/m68k/include/asm/mac_via.h +++ b/arch/m68k/include/asm/mac_via.h @@ -257,6 +257,7 @@ extern int rbv_present,via_alt_mapping; struct irq_desc; +extern void via_l2_flush(int writeback); extern void via_register_interrupts(void); extern void via_irq_enable(int); extern void via_irq_disable(int); diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index 3c5def10d486..caa260f877f2 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c @@ -139,7 +139,8 @@ void __init setup_arch(char **cmdline_p) pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ", __bss_stop, memory_start, memory_start, memory_end); - memblock_add(memory_start, memory_end - memory_start); + memblock_add(_rambase, memory_end - _rambase); + memblock_reserve(_rambase, memory_start - _rambase); /* Keep a copy of command line */ *cmdline_p = &command_line[0]; diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 611f73bfc87c..d0126ab01360 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -59,7 +59,6 @@ extern void iop_preinit(void); extern void iop_init(void); extern void via_init(void); extern void via_init_clock(irq_handler_t func); -extern void via_flush_cache(void); extern void oss_init(void); extern void psc_init(void); extern void baboon_init(void); @@ -130,21 +129,6 @@ int __init mac_parse_bootinfo(const struct bi_record *record) return unknown; } -/* - * Flip into 24bit mode for an instant - flushes the L2 cache card. We - * have to disable interrupts for this. Our IRQ handlers will crap - * themselves if they take an IRQ in 24bit mode! - */ - -static void mac_cache_card_flush(int writeback) -{ - unsigned long flags; - - local_irq_save(flags); - via_flush_cache(); - local_irq_restore(flags); -} - void __init config_mac(void) { if (!MACH_IS_MAC) @@ -175,9 +159,8 @@ void __init config_mac(void) * not. */ - if (macintosh_config->ident == MAC_MODEL_IICI - || macintosh_config->ident == MAC_MODEL_IIFX) - mach_l2_flush = mac_cache_card_flush; + if (macintosh_config->ident == MAC_MODEL_IICI) + mach_l2_flush = via_l2_flush; } diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c index 9bfa17015768..c432bfafe63e 100644 --- a/arch/m68k/mac/iop.c +++ b/arch/m68k/mac/iop.c @@ -183,7 +183,7 @@ static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8 static __inline__ void iop_stop(volatile struct mac_iop *iop) { - iop->status_ctrl &= ~IOP_RUN; + iop->status_ctrl = IOP_AUTOINC; } static __inline__ void iop_start(volatile struct mac_iop *iop) @@ -191,14 +191,9 @@ static __inline__ void iop_start(volatile struct mac_iop *iop) iop->status_ctrl = IOP_RUN | IOP_AUTOINC; } -static __inline__ void iop_bypass(volatile struct mac_iop *iop) -{ - iop->status_ctrl |= IOP_BYPASS; -} - static __inline__ void iop_interrupt(volatile struct mac_iop *iop) { - iop->status_ctrl |= IOP_IRQ; + iop->status_ctrl = IOP_IRQ | IOP_RUN | IOP_AUTOINC; } static int iop_alive(volatile struct mac_iop *iop) @@ -244,7 +239,6 @@ void __init iop_preinit(void) } else { iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA; } - iop_base[IOP_NUM_SCC]->status_ctrl = 0x87; iop_scc_present = 1; } else { iop_base[IOP_NUM_SCC] = NULL; @@ -256,7 +250,7 @@ void __init iop_preinit(void) } else { iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA; } - iop_base[IOP_NUM_ISM]->status_ctrl = 0; + iop_stop(iop_base[IOP_NUM_ISM]); iop_ism_present = 1; } else { iop_base[IOP_NUM_ISM] = NULL; @@ -416,7 +410,8 @@ static void iop_handle_send(uint iop_num, uint chan) msg->status = IOP_MSGSTATUS_UNUSED; msg = msg->next; iop_send_queue[iop_num][chan] = msg; - if (msg) iop_do_send(msg); + if (msg && iop_readb(iop, IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) + iop_do_send(msg); } /* @@ -490,16 +485,12 @@ int iop_send_message(uint iop_num, uint chan, void *privdata, if (!(q = iop_send_queue[iop_num][chan])) { iop_send_queue[iop_num][chan] = msg; + iop_do_send(msg); } else { while (q->next) q = q->next; q->next = msg; } - if (iop_readb(iop_base[iop_num], - IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) { - iop_do_send(msg); - } - return 0; } diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index 3c2cfcb74982..1f0fad2a98a0 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -294,10 +294,14 @@ void via_debug_dump(void) * the system into 24-bit mode for an instant. */ -void via_flush_cache(void) +void via_l2_flush(int writeback) { + unsigned long flags; + + local_irq_save(flags); via2[gBufB] &= ~VIA2B_vMode32; via2[gBufB] |= VIA2B_vMode32; + local_irq_restore(flags); } /* diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 6cb1e41d58d0..70a5f55ea664 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -164,7 +164,7 @@ void __init cf_bootmem_alloc(void) m68k_memory[0].addr = _rambase; m68k_memory[0].size = _ramend - _rambase; - memblock_add(m68k_memory[0].addr, m68k_memory[0].size); + memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0); /* compute total pages in system */ num_pages = PFN_DOWN(_ramend - _rambase); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index e5c2d47608fe..6ecdc690f733 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -862,6 +862,7 @@ config SNI_RM select I8253 select I8259 select ISA + select MIPS_L1_CACHE_SHIFT_6 select SWAP_IO_SPACE if CPU_BIG_ENDIAN select SYS_HAS_CPU_R4X00 select SYS_HAS_CPU_R5000 diff --git a/arch/mips/Makefile b/arch/mips/Makefile index cdc09b71febe..5403a91ce098 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -285,12 +285,23 @@ ifdef CONFIG_64BIT endif endif +# When linking a 32-bit executable the LLVM linker cannot cope with a +# 32-bit load address that has been sign-extended to 64 bits. Simply +# remove the upper 32 bits then, as it is safe to do so with other +# linkers. +ifdef CONFIG_64BIT + load-ld = $(load-y) +else + load-ld = $(subst 0xffffffff,0x,$(load-y)) +endif + KBUILD_AFLAGS += $(cflags-y) KBUILD_CFLAGS += $(cflags-y) -KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) +KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld) KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ + LINKER_LOAD_ADDRESS=$(load-ld) \ VMLINUX_ENTRY_ADDRESS=$(entry-y) \ PLATFORM="$(platform-y)" \ ITS_INPUTS="$(its-y)" diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index d859f079b771..378cbfb31ee7 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -90,7 +90,7 @@ ifneq ($(zload-y),) VMLINUZ_LOAD_ADDRESS := $(zload-y) else VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ - $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) + $(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS)) endif UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS) diff --git a/arch/mips/boot/dts/ingenic/qi_lb60.dts b/arch/mips/boot/dts/ingenic/qi_lb60.dts index 7a371d9c5a33..eda37fb516f0 100644 --- a/arch/mips/boot/dts/ingenic/qi_lb60.dts +++ b/arch/mips/boot/dts/ingenic/qi_lb60.dts @@ -69,7 +69,7 @@ "Speaker", "OUTL", "Speaker", "OUTR", "INL", "LOUT", - "INL", "ROUT"; + "INR", "ROUT"; simple-audio-card,aux-devs = <&>; diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c index cc88a08bc1f7..4017398519cf 100644 --- a/arch/mips/cavium-octeon/octeon-usb.c +++ b/arch/mips/cavium-octeon/octeon-usb.c @@ -518,6 +518,7 @@ static int __init dwc3_octeon_device_init(void) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { + put_device(&pdev->dev); dev_err(&pdev->dev, "No memory resources\n"); return -ENXIO; } @@ -529,8 +530,10 @@ static int __init dwc3_octeon_device_init(void) * know the difference. */ base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(base)) + if (IS_ERR(base)) { + put_device(&pdev->dev); return PTR_ERR(base); + } mutex_lock(&dwc3_octeon_clocks_mutex); dwc3_octeon_clocks_start(&pdev->dev, (u64)base); diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index f14ad0538f4e..eea9b613bb74 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig @@ -112,7 +112,6 @@ CONFIG_BLK_DEV_TC86C001=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SCH=m CONFIG_ATA=y diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig index 7a7af706e898..c5f66b7f2b22 100644 --- a/arch/mips/configs/fuloong2e_defconfig +++ b/arch/mips/configs/fuloong2e_defconfig @@ -99,7 +99,6 @@ CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y # CONFIG_SCSI_LOWLEVEL is not set diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index 82d942a6026e..638d7cf5ef01 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -99,7 +99,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_CONSTANTS=y diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig index 370884018aad..7b1fab518317 100644 --- a/arch/mips/configs/ip32_defconfig +++ b/arch/mips/configs/ip32_defconfig @@ -50,7 +50,6 @@ CONFIG_RAID_ATTRS=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig index 328d4dfeb4cb..982b990469af 100644 --- a/arch/mips/configs/jazz_defconfig +++ b/arch/mips/configs/jazz_defconfig @@ -191,7 +191,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_FC_ATTRS=y diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index 90ee0084d786..e41f4841cb4d 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig @@ -231,7 +231,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_USB_SUPPORT=y CONFIG_USB_VIDEO_CLASS=m CONFIG_DRM=y -CONFIG_DRM_RADEON=y +CONFIG_DRM_RADEON=m CONFIG_FB_RADEON=y CONFIG_LCD_CLASS_DEVICE=y CONFIG_LCD_PLATFORM=m diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index 59eedf55419d..211bd3d6e6cb 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@ -239,7 +239,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig index 8ef612552a19..62b1969b4f55 100644 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig @@ -247,7 +247,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig index d2a008c9907c..9185e0a0aa45 100644 --- a/arch/mips/configs/malta_kvm_guest_defconfig +++ b/arch/mips/configs/malta_kvm_guest_defconfig @@ -245,7 +245,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig index 970df6d42728..636311d67a53 100644 --- a/arch/mips/configs/maltaup_xpa_defconfig +++ b/arch/mips/configs/maltaup_xpa_defconfig @@ -245,7 +245,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig index 2c7adea7638f..30d7c3db884e 100644 --- a/arch/mips/configs/rm200_defconfig +++ b/arch/mips/configs/rm200_defconfig @@ -203,7 +203,6 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_FC_ATTRS=y diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 983a6a7f43a1..3e26b0c7391b 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -288,10 +288,12 @@ # define cpu_has_mips32r6 __isa_ge_or_flag(6, MIPS_CPU_ISA_M32R6) #endif #ifndef cpu_has_mips64r1 -# define cpu_has_mips64r1 __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1) +# define cpu_has_mips64r1 (cpu_has_64bits && \ + __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1)) #endif #ifndef cpu_has_mips64r2 -# define cpu_has_mips64r2 __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2) +# define cpu_has_mips64r2 (cpu_has_64bits && \ + __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2)) #endif #ifndef cpu_has_mips64r6 # define cpu_has_mips64r6 __isa_ge_and_flag(6, MIPS_CPU_ISA_M64R6) diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 7b47a323dc23..356c61074d13 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -939,7 +939,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, unsigned flags); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index bdbdc19a2b8f..3afdb39d092a 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -750,7 +750,7 @@ /* MAAR bit definitions */ #define MIPS_MAAR_VH (_U64CAST_(1) << 63) -#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12) +#define MIPS_MAAR_ADDR GENMASK_ULL(55, 12) #define MIPS_MAAR_ADDR_SHIFT 12 #define MIPS_MAAR_S (_ULCAST_(1) << 1) #define MIPS_MAAR_VL (_ULCAST_(1) << 0) diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index efde27c99414..9c5f8a5d097f 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -474,20 +474,20 @@ NESTED(nmi_handler, PT_SIZE, sp) .endm .macro __build_clear_fpe + CLI + TRACE_IRQS_OFF .set push /* gas fails to assemble cfc1 for some archs (octeon).*/ \ .set mips1 SET_HARDFLOAT cfc1 a1, fcr31 .set pop - CLI - TRACE_IRQS_OFF .endm .macro __build_clear_msa_fpe - _cfcmsa a1, MSA_CSR CLI TRACE_IRQS_OFF + _cfcmsa a1, MSA_CSR .endm .macro __build_clear_ade diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index e5ea3db23d6b..a9eab83d9148 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -119,9 +119,9 @@ static char *cm2_causes[32] = { "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07", "0x08", "0x09", "0x0a", "0x0b", "0x0c", "0x0d", "0x0e", "0x0f", - "0x10", "0x11", "0x12", "0x13", - "0x14", "0x15", "0x16", "INTVN_WR_ERR", - "INTVN_RD_ERR", "0x19", "0x1a", "0x1b", + "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13", + "0x14", "0x15", "0x16", "0x17", + "0x18", "0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f" }; diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 5eec13b8d222..b8884de89c81 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -494,7 +494,7 @@ static void __init mips_parse_crashkernel(void) if (ret != 0 || crash_size <= 0) return; - if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 0)) { + if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1)) { pr_warn("Invalid memory region reserved for crash kernel\n"); return; } @@ -653,7 +653,17 @@ static void __init arch_mem_init(char **cmdline_p) crashk_res.end - crashk_res.start + 1); #endif device_tree_init(); + + /* + * In order to reduce the possibility of kernel panic when failed to + * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate + * low memory as small as possible before plat_swiotlb_setup(), so + * make sparse_init() using top-down allocation. + */ + memblock_set_bottom_up(false); sparse_init(); + memblock_set_bottom_up(true); + plat_swiotlb_setup(); dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 712c15de6ab9..6b304acf506f 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -241,6 +241,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle) */ static void bmips_init_secondary(void) { + bmips_cpu_setup(); + switch (current_cpu_type()) { case CPU_BMIPS4350: case CPU_BMIPS4380: diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 37e9413a393d..caa01457dce6 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -18,12 +18,82 @@ #include #include #include +#include +#include #include #include #include #include +#ifdef CONFIG_CPU_FREQ + +static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref); +static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq); +static unsigned long glb_lpj_ref; +static unsigned long glb_lpj_ref_freq; + +static int cpufreq_callback(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpumask *cpus = freq->policy->cpus; + unsigned long lpj; + int cpu; + + /* + * Skip lpj numbers adjustment if the CPU-freq transition is safe for + * the loops delay. (Is this possible?) + */ + if (freq->flags & CPUFREQ_CONST_LOOPS) + return NOTIFY_OK; + + /* Save the initial values of the lpjes for future scaling. */ + if (!glb_lpj_ref) { + glb_lpj_ref = boot_cpu_data.udelay_val; + glb_lpj_ref_freq = freq->old; + + for_each_online_cpu(cpu) { + per_cpu(pcp_lpj_ref, cpu) = + cpu_data[cpu].udelay_val; + per_cpu(pcp_lpj_ref_freq, cpu) = freq->old; + } + } + + /* + * Adjust global lpj variable and per-CPU udelay_val number in + * accordance with the new CPU frequency. + */ + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { + loops_per_jiffy = cpufreq_scale(glb_lpj_ref, + glb_lpj_ref_freq, + freq->new); + + for_each_cpu(cpu, cpus) { + lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu), + per_cpu(pcp_lpj_ref_freq, cpu), + freq->new); + cpu_data[cpu].udelay_val = (unsigned int)lpj; + } + } + + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_notifier = { + .notifier_call = cpufreq_callback, +}; + +static int __init register_cpufreq_notifier(void) +{ + return cpufreq_register_notifier(&cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); +} +core_initcall(register_cpufreq_notifier); + +#endif /* CONFIG_CPU_FREQ */ + /* * forward reference */ diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c index cd3e1f82e1a5..08ad6371fbe0 100644 --- a/arch/mips/kernel/topology.c +++ b/arch/mips/kernel/topology.c @@ -20,7 +20,7 @@ static int __init topology_init(void) for_each_present_cpu(i) { struct cpu *c = &per_cpu(cpu_devices, i); - c->hotpluggable = 1; + c->hotpluggable = !!i; ret = register_cpu(c, i); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 342e41de9d64..8282d0feb0b2 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1240,6 +1240,18 @@ static int enable_restore_fp_context(int msa) err = own_fpu_inatomic(1); if (msa && !err) { enable_msa(); + /* + * with MSA enabled, userspace can see MSACSR + * and MSA regs, but the values in them are from + * other task before current task, restore them + * from saved fp/msa context + */ + write_msa_csr(current->thread.fpu.msacsr); + /* + * own_fpu_inatomic(1) just restore low 64bit, + * fix the high 64bit + */ + init_msa_upper(); set_thread_flag(TIF_USEDMSA); set_thread_flag(TIF_MSA_CTX_LIVE); } @@ -2126,6 +2138,7 @@ static void configure_status(void) change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, status_set); + back_to_back_c0_hazard(); } unsigned int hwrena; diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 33ee0d18fb0a..eb9d7af93836 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -50,7 +50,7 @@ SECTIONS /* . = 0xa800000000300000; */ . = 0xffffffff80300000; #endif - . = VMLINUX_LOAD_ADDRESS; + . = LINKER_LOAD_ADDRESS; /* read-only */ _text = .; /* Text and read-only data */ .text : { diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 1109924560d8..b22a3565e133 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -131,6 +131,8 @@ int kvm_arch_check_processor_compat(void) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { switch (type) { + case KVM_VM_MIPS_AUTO: + break; #ifdef CONFIG_KVM_MIPS_VZ case KVM_VM_MIPS_VZ: #else diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 97e538a8c1be..97f63a84aa51 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -512,7 +512,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, return 1; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 156a95ac5c72..2ee68d6e8bb9 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -514,8 +514,8 @@ void __init ltq_soc_init(void) clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP | PMU_PPE_TC); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); - clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY); - clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); @@ -538,8 +538,8 @@ void __init ltq_soc_init(void) PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | PMU_PPE_QSB | PMU_PPE_TOP); - clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY); - clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 89b9c851d822..c4785a456ded 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1676,7 +1676,11 @@ static void setup_scache(void) printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); + + if (current_cpu_type() == CPU_BMIPS5000) + c->options |= MIPS_CPU_INCLUSIVE_CACHES; } + #else if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c index 30017d5945bc..c4b1c6cf2660 100644 --- a/arch/mips/pci/pci-xtalk-bridge.c +++ b/arch/mips/pci/pci-xtalk-bridge.c @@ -444,9 +444,10 @@ static int bridge_probe(struct platform_device *pdev) return -ENOMEM; domain = irq_domain_create_hierarchy(parent, 0, 8, fn, &bridge_domain_ops, NULL); - irq_domain_free_fwnode(fn); - if (!domain) + if (!domain) { + irq_domain_free_fwnode(fn); return -ENOMEM; + } pci_set_flags(PCI_PROBE_ONLY); @@ -538,6 +539,7 @@ err_free_resource: pci_free_resource_list(&host->windows); err_remove_domain: irq_domain_remove(domain); + irq_domain_free_fwnode(fn); return err; } @@ -545,8 +547,10 @@ static int bridge_remove(struct platform_device *pdev) { struct pci_bus *bus = platform_get_drvdata(pdev); struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); + struct fwnode_handle *fn = bc->domain->fwnode; irq_domain_remove(bc->domain); + irq_domain_free_fwnode(fn); pci_lock_rescan_remove(); pci_stop_root_bus(bus); pci_remove_root_bus(bus); diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c index f9407e170476..c6af7047eb0d 100644 --- a/arch/mips/sni/a20r.c +++ b/arch/mips/sni/a20r.c @@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = { }, }; -static u32 a20r_ack_hwint(void) +/* + * Trigger chipset to update CPU's CAUSE IP field + */ +static u32 a20r_update_cause_ip(void) { u32 status = read_c0_status(); @@ -205,12 +208,14 @@ static void a20r_hwint(void) int irq; clear_c0_status(IE_IRQ0); - status = a20r_ack_hwint(); + status = a20r_update_cause_ip(); cause = read_c0_cause(); irq = ffs(((cause & status) >> 8) & 0xf8); if (likely(irq > 0)) do_IRQ(SNI_A20R_IRQ_BASE + irq - 1); + + a20r_update_cause_ip(); set_c0_status(IE_IRQ0); } diff --git a/arch/mips/tools/elf-entry.c b/arch/mips/tools/elf-entry.c index adde79ce7fc0..dbd14ff05b4c 100644 --- a/arch/mips/tools/elf-entry.c +++ b/arch/mips/tools/elf-entry.c @@ -51,11 +51,14 @@ int main(int argc, const char *argv[]) nread = fread(&hdr, 1, sizeof(hdr), file); if (nread != sizeof(hdr)) { perror("Unable to read input file"); + fclose(file); return EXIT_FAILURE; } - if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG)) + if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG)) { + fclose(file); die("Input is not an ELF\n"); + } switch (hdr.ehdr32.e_ident[EI_CLASS]) { case ELFCLASS32: @@ -67,6 +70,7 @@ int main(int argc, const char *argv[]) entry = be32toh(hdr.ehdr32.e_entry); break; default: + fclose(file); die("Invalid ELF encoding\n"); } @@ -83,14 +87,17 @@ int main(int argc, const char *argv[]) entry = be64toh(hdr.ehdr64.e_entry); break; default: + fclose(file); die("Invalid ELF encoding\n"); } break; default: + fclose(file); die("Invalid ELF class\n"); } printf("0x%016" PRIx64 "\n", entry); + fclose(file); return EXIT_SUCCESS; } diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c index b66b6b1c4aeb..8f581a2c8578 100644 --- a/arch/mips/vdso/genvdso.c +++ b/arch/mips/vdso/genvdso.c @@ -122,6 +122,7 @@ static void *map_vdso(const char *path, size_t *_size) if (fstat(fd, &stat) != 0) { fprintf(stderr, "%s: Failed to stat '%s': %s\n", program_name, path, strerror(errno)); + close(fd); return NULL; } @@ -130,6 +131,7 @@ static void *map_vdso(const char *path, size_t *_size) if (addr == MAP_FAILED) { fprintf(stderr, "%s: Failed to map '%s': %s\n", program_name, path, strerror(errno)); + close(fd); return NULL; } @@ -139,6 +141,7 @@ static void *map_vdso(const char *path, size_t *_size) if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) { fprintf(stderr, "%s: '%s' is not an ELF file\n", program_name, path); + close(fd); return NULL; } @@ -150,6 +153,7 @@ static void *map_vdso(const char *path, size_t *_size) default: fprintf(stderr, "%s: '%s' has invalid ELF class\n", program_name, path); + close(fd); return NULL; } @@ -161,6 +165,7 @@ static void *map_vdso(const char *path, size_t *_size) default: fprintf(stderr, "%s: '%s' has invalid ELF data order\n", program_name, path); + close(fd); return NULL; } @@ -168,15 +173,18 @@ static void *map_vdso(const char *path, size_t *_size) fprintf(stderr, "%s: '%s' has invalid ELF machine (expected EM_MIPS)\n", program_name, path); + close(fd); return NULL; } else if (swap_uint16(ehdr->e_type) != ET_DYN) { fprintf(stderr, "%s: '%s' has invalid ELF type (expected ET_DYN)\n", program_name, path); + close(fd); return NULL; } *_size = stat.st_size; + close(fd); return addr; } @@ -280,10 +288,12 @@ int main(int argc, char **argv) /* Calculate and write symbol offsets to */ if (!get_symbols(dbg_vdso_path, dbg_vdso)) { unlink(out_path); + fclose(out_file); return EXIT_FAILURE; } fprintf(out_file, "};\n"); + fclose(out_file); return EXIT_SUCCESS; } diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index e4a78571f883..c6481cfc5220 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -1166,13 +1166,13 @@ ENTRY(__sys_clone) l.movhi r29,hi(sys_clone) l.ori r29,r29,lo(sys_clone) l.j _fork_save_extra_regs_and_call - l.addi r7,r1,0 + l.nop ENTRY(__sys_fork) l.movhi r29,hi(sys_fork) l.ori r29,r29,lo(sys_fork) l.j _fork_save_extra_regs_and_call - l.addi r3,r1,0 + l.nop ENTRY(sys_rt_sigreturn) l.jal _sys_rt_sigreturn diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c index 43f140a28bc7..54d38809e22c 100644 --- a/arch/openrisc/kernel/stacktrace.c +++ b/arch/openrisc/kernel/stacktrace.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long *sp = NULL; + if (!try_get_task_stack(tsk)) + return; + if (tsk == current) sp = (unsigned long *) &sp; - else - sp = (unsigned long *) KSTK_ESP(tsk); + else { + unsigned long ksp; + + /* Locate stack from kernel context */ + ksp = task_thread_info(tsk)->ksp; + ksp += STACK_FRAME_OVERHEAD; /* redzone */ + ksp += sizeof(struct pt_regs); + + sp = (unsigned long *) ksp; + } unwind_stack(trace, sp, save_stack_address_nosched); + + put_task_stack(tsk); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c index 08f56af387ac..534a52ec5e66 100644 --- a/arch/openrisc/mm/cache.c +++ b/arch/openrisc/mm/cache.c @@ -16,7 +16,7 @@ #include #include -static void cache_loop(struct page *page, const unsigned int reg) +static __always_inline void cache_loop(struct page *page, const unsigned int reg) { unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT; unsigned long line = paddr & ~(L1_CACHE_BYTES - 1); diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 36b834f1c933..53f974817aff 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -156,7 +156,7 @@ vmlinuz: bzImage $(OBJCOPY) $(boot)/bzImage $@ else vmlinuz: vmlinux - @gzip -cf -9 $< > $@ + @$(KGZIP) -cf -9 $< > $@ endif install: diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 118953d41763..6dd4171c9530 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -212,6 +212,8 @@ atomic64_set(atomic64_t *v, s64 i) _atomic_spin_unlock_irqrestore(v, flags); } +#define atomic64_set_release(v, i) atomic64_set((v), (i)) + static __inline__ s64 atomic64_read(const atomic64_t *v) { diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h index dbaaca84f27f..640d46edf32e 100644 --- a/arch/parisc/include/asm/barrier.h +++ b/arch/parisc/include/asm/barrier.h @@ -26,6 +26,67 @@ #define __smp_rmb() mb() #define __smp_wmb() mb() +#define __smp_store_release(p, v) \ +do { \ + typeof(p) __p = (p); \ + union { typeof(*p) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(*p)) (v) }; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile("stb,ma %0,0(%1)" \ + : : "r"(*(__u8 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("sth,ma %0,0(%1)" \ + : : "r"(*(__u16 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("stw,ma %0,0(%1)" \ + : : "r"(*(__u32 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 8: \ + if (IS_ENABLED(CONFIG_64BIT)) \ + asm volatile("std,ma %0,0(%1)" \ + : : "r"(*(__u64 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + } \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + union { typeof(*p) __val; char __c[1]; } __u; \ + typeof(p) __p = (p); \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile("ldb,ma 0(%1),%0" \ + : "=r"(*(__u8 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("ldh,ma 0(%1),%0" \ + : "=r"(*(__u16 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("ldw,ma 0(%1),%0" \ + : "=r"(*(__u32 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 8: \ + if (IS_ENABLED(CONFIG_64BIT)) \ + asm volatile("ldd,ma 0(%1),%0" \ + : "=r"(*(__u64 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + } \ + __u.__val; \ +}) #include #endif /* !__ASSEMBLY__ */ diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index ab5c215cf46c..068958575871 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void); extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); +extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); /* don't worry...optimizer will get rid of most of this */ static inline unsigned long @@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) #endif case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int)old, (unsigned int)new_); + case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_); } __cmpxchg_called_with_bad_pointer(); return old; diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 197d2247e4db..16aec9ba2580 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -37,12 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x) volatile unsigned int *a; a = __ldcw_align(x); -#ifdef CONFIG_SMP - (void) __ldcw(a); -#else - mb(); -#endif - *a = 1; + /* Release with ordered store. */ + __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory"); } static inline int arch_spin_trylock(arch_spinlock_t *x) diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index b96d74496977..873bf3434da9 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -454,7 +454,6 @@ nop LDREG 0(\ptp),\pte bb,<,n \pte,_PAGE_PRESENT_BIT,3f - LDCW 0(\tmp),\tmp1 b \fault stw \spc,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) @@ -464,23 +463,26 @@ 3: .endm - /* Release pa_tlb_lock lock without reloading lock address. */ - .macro tlb_unlock0 spc,tmp,tmp1 + /* Release pa_tlb_lock lock without reloading lock address. + Note that the values in the register spc are limited to + NR_SPACE_IDS (262144). Thus, the stw instruction always + stores a nonzero value even when register spc is 64 bits. + We use an ordered store to ensure all prior accesses are + performed prior to releasing the lock. */ + .macro tlb_unlock0 spc,tmp #ifdef CONFIG_SMP 98: or,COND(=) %r0,\spc,%r0 - LDCW 0(\tmp),\tmp1 - or,COND(=) %r0,\spc,%r0 - stw \spc,0(\tmp) + stw,ma \spc,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm /* Release pa_tlb_lock lock. */ - .macro tlb_unlock1 spc,tmp,tmp1 + .macro tlb_unlock1 spc,tmp #ifdef CONFIG_SMP 98: load_pa_tlb_lock \tmp 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) - tlb_unlock0 \spc,\tmp,\tmp1 + tlb_unlock0 \spc,\tmp #endif .endm @@ -1163,7 +1165,7 @@ dtlb_miss_20w: idtlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1189,7 +1191,7 @@ nadtlb_miss_20w: idtlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1223,7 +1225,7 @@ dtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1256,7 +1258,7 @@ nadtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1285,7 +1287,7 @@ dtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1313,7 +1315,7 @@ nadtlb_miss_20: idtlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1420,7 +1422,7 @@ itlb_miss_20w: iitlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1444,7 +1446,7 @@ naitlb_miss_20w: iitlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1478,7 +1480,7 @@ itlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1502,7 +1504,7 @@ naitlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1532,7 +1534,7 @@ itlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1552,7 +1554,7 @@ naitlb_miss_20: iitlbt pte,prot - tlb_unlock1 spc,t0,t1 + tlb_unlock1 spc,t0 rfir nop @@ -1582,7 +1584,7 @@ dbit_trap_20w: idtlbt pte,prot - tlb_unlock0 spc,t0,t1 + tlb_unlock0 spc,t0 rfir nop #else @@ -1608,7 +1610,7 @@ dbit_trap_11: mtsp t1, %sr1 /* Restore sr1 */ - tlb_unlock0 spc,t0,t1 + tlb_unlock0 spc,t0 rfir nop @@ -1628,7 +1630,7 @@ dbit_trap_20: idtlbt pte,prot - tlb_unlock0 spc,t0,t1 + tlb_unlock0 spc,t0 rfir nop #endif diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 97ac707c6bff..a37814cb66c7 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -640,11 +640,7 @@ cas_action: sub,<> %r28, %r25, %r0 2: stw %r24, 0(%r26) /* Free lock */ -#ifdef CONFIG_SMP -98: LDCW 0(%sr2,%r20), %r1 /* Barrier */ -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) -#endif - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) @@ -658,11 +654,7 @@ cas_action: 3: /* Error occurred on load or store */ /* Free lock */ -#ifdef CONFIG_SMP -98: LDCW 0(%sr2,%r20), %r1 /* Barrier */ -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) -#endif - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) #endif @@ -863,11 +855,7 @@ cas2_action: cas2_end: /* Free lock */ -#ifdef CONFIG_SMP -98: LDCW 0(%sr2,%r20), %r1 /* Barrier */ -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) -#endif - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) /* Enable interrupts */ ssm PSW_SM_I, %r0 /* Return to userspace, set no error */ @@ -877,11 +865,7 @@ cas2_end: 22: /* Error occurred on load or store */ /* Free lock */ -#ifdef CONFIG_SMP -98: LDCW 0(%sr2,%r20), %r1 /* Barrier */ -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) -#endif - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) ssm PSW_SM_I, %r0 ldo 1(%r0),%r28 b lws_exit diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 70ffbcf889b8..2e4d1f05a926 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)prev; } + +u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) +{ + unsigned long flags; + u8 prev; + + _atomic_spin_lock_irqsave(ptr, flags); + if ((prev = *ptr) == old) + *ptr = new; + _atomic_spin_unlock_irqrestore(ptr, flags); + return prev; +} diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 03c1b076b51b..e924ddd54618 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -171,7 +171,7 @@ config PPC select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KASAN if PPC32 + select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14 select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT @@ -747,6 +747,7 @@ config THREAD_SHIFT range 13 15 default "15" if PPC_256K_PAGES default "14" if PPC64 + default "14" if KASAN default "13" help Used to define the stack size. The default is almost always what you diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index dfbd7f22eef5..8c69bd07ada6 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -119,7 +119,7 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ elf_util.c $(zlib-y) devtree.c stdlib.c \ oflib.c ofconsole.c cuboot.c -src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c +src-wlib-$(CONFIG_PPC_MPC52xx) += mpc52xx-psc.c src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S opal.c ifndef CONFIG_PPC64_BOOT_WRAPPER src-wlib-y += crtsavres.S diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c index 9457863147f9..00179cd6bdd0 100644 --- a/arch/powerpc/boot/serial.c +++ b/arch/powerpc/boot/serial.c @@ -128,7 +128,7 @@ int serial_console_init(void) dt_is_compatible(devp, "fsl,cpm2-smc-uart")) rc = cpm_console_init(devp, &serial_cd); #endif -#ifdef CONFIG_PPC_MPC52XX +#ifdef CONFIG_PPC_MPC52xx else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart")) rc = mpc5200_psc_console_init(devp, &serial_cd); #endif diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config index 9575a38c9155..b507df6ac69f 100644 --- a/arch/powerpc/configs/85xx-hw.config +++ b/arch/powerpc/configs/85xx-hw.config @@ -2,7 +2,6 @@ CONFIG_AQUANTIA_PHY=y CONFIG_AT803X_PHY=y CONFIG_ATA=y CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_BLK_DEV_SR=y CONFIG_BROADCOM_PHY=y CONFIG_C293_PCIE=y diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig index cf94d28d0e31..340140160c7b 100644 --- a/arch/powerpc/configs/amigaone_defconfig +++ b/arch/powerpc/configs/amigaone_defconfig @@ -47,7 +47,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SYM53C8XX_2=y diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig index 9ff493dd8439..6c5a4414e9ee 100644 --- a/arch/powerpc/configs/chrp32_defconfig +++ b/arch/powerpc/configs/chrp32_defconfig @@ -45,7 +45,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SYM53C8XX_2=y diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index fbfcc85e4dc0..a68c7f3af10e 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -62,7 +62,6 @@ CONFIG_CDROM_PKTCDVD=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SPI_ATTRS=y diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index 2975e64629aa..161351a18517 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig @@ -41,7 +41,6 @@ CONFIG_BLK_DEV_RAM_SIZE=8192 # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_IPR=y CONFIG_ATA=y diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 4b6d31d4474e..ddf5e97877e2 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -60,7 +60,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_CHR_DEV_OSST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=y CONFIG_SCSI_CONSTANTS=y @@ -110,7 +109,6 @@ CONFIG_FB_NVIDIA=y CONFIG_FB_NVIDIA_I2C=y CONFIG_FB_RADEON=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 4e6e95f92646..5cad09f93562 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -119,7 +119,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_FC_ATTRS=y diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index 6658cceb928c..2a7c53cc2f83 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -111,7 +111,6 @@ CONFIG_BLK_DEV_NVME=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SCAN_ASYNC=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index b250e6f5a7ca..5569d36066dc 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -110,7 +110,6 @@ CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_FC_ATTRS=y diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 0d746774c2bd..33a01a9e86be 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -60,7 +60,6 @@ CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_FC_ATTRS=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 9dca4cffa623..682d68f39c2b 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -372,7 +372,6 @@ CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_ENCLOSURE=m @@ -778,7 +777,6 @@ CONFIG_FB_TRIDENT=m CONFIG_FB_SM501=m CONFIG_FB_IBM_GXT4500=y CONFIG_LCD_PLATFORM=m -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 26126b4d4de3..d58686a66388 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -97,7 +97,6 @@ CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_FC_ATTRS=y diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig index 1253482a67c0..2e25b264f70f 100644 --- a/arch/powerpc/configs/skiroot_defconfig +++ b/arch/powerpc/configs/skiroot_defconfig @@ -83,7 +83,6 @@ CONFIG_EEPROM_AT24=m # CONFIG_OCXL is not set CONFIG_BLK_DEV_SD=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SCAN_ASYNC=y diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 1a2c80e8be84..6eb311eb818b 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -2,6 +2,7 @@ #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H #define _ASM_POWERPC_BOOK3S_32_KUP_H +#include #include #ifdef __ASSEMBLY__ @@ -75,7 +76,7 @@ .macro kuap_check current, gpr #ifdef CONFIG_PPC_KUAP_DEBUG - lwz \gpr, KUAP(thread) + lwz \gpr, THREAD + KUAP(\current) 999: twnei \gpr, 0 EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) #endif diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index bb3deb76c951..2f4ddc802fe9 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -225,14 +225,14 @@ static inline void early_init_mmu_secondary(void) extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size); -extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, - phys_addr_t first_memblock_size); static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { - if (early_radix_enabled()) - return radix__setup_initial_memory_limit(first_memblock_base, - first_memblock_size); + /* + * Hash has more strict restrictions. At this point we don't + * know which translations we will pick. Hence go with hash + * restrictions. + */ return hash__setup_initial_memory_limit(first_memblock_base, first_memblock_size); } diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index a143d394ff46..e1eb8aa9cfbb 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -998,10 +998,25 @@ extern struct page *pgd_page(pgd_t pgd); #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) #define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) -#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) -#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) -#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1)) -#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1)) +static inline unsigned long pgd_index(unsigned long address) +{ + return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1); +} + +static inline unsigned long pud_index(unsigned long address) +{ + return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); +} + +static inline unsigned long pmd_index(unsigned long address) +{ + return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); +} + +static inline unsigned long pte_index(unsigned long address) +{ + return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); +} /* * Find an entry in a page-table-directory. We combine the address region diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h index c814a2b55389..8d61c8f3fec4 100644 --- a/arch/powerpc/include/asm/fadump-internal.h +++ b/arch/powerpc/include/asm/fadump-internal.h @@ -64,12 +64,14 @@ struct fadump_memory_range { }; /* fadump memory ranges info */ +#define RNG_NAME_SZ 16 struct fadump_mrange_info { - char name[16]; + char name[RNG_NAME_SZ]; struct fadump_memory_range *mem_ranges; u32 mem_ranges_sz; u32 mem_range_cnt; u32 max_mem_ranges; + bool is_static; }; /* Platform specific callback functions */ diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index 296e51c2f066..6db06f58deed 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -23,9 +23,7 @@ #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET) -#define KASAN_SHADOW_END 0UL - -#define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START) +#define KASAN_SHADOW_END (-(-KASAN_SHADOW_START >> KASAN_SHADOW_SCALE_SHIFT)) #ifdef CONFIG_KASAN void kasan_early_init(void); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 6fe6ad64cba5..740b52ec3509 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -58,7 +58,8 @@ #define KVM_ARCH_WANT_MMU_NOTIFIER extern int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, + unsigned flags); extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h index dce863a7635c..8e5b7d0b851c 100644 --- a/arch/powerpc/include/asm/percpu.h +++ b/arch/powerpc/include/asm/percpu.h @@ -10,8 +10,6 @@ #ifdef CONFIG_SMP -#include - #define __my_cpu_offset local_paca->data_offset #endif /* CONFIG_SMP */ @@ -19,4 +17,6 @@ #include +#include + #endif /* _ASM_POWERPC_PERCPU_H_ */ diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index 7426d7a90e1e..7aba3c7ea25c 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h @@ -12,6 +12,8 @@ #ifdef CONFIG_PPC_PERF_CTRS #include +#else +static inline bool is_sier_available(void) { return false; } #endif #ifdef CONFIG_FSL_EMB_PERF_EVENT diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index a9993e7a443b..64b998db9d3e 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -291,7 +291,6 @@ struct thread_struct { #else #define INIT_THREAD { \ .ksp = INIT_SP, \ - .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ .addr_limit = KERNEL_DS, \ .fpexc_mode = 0, \ .fscr = FSCR_TAR | FSCR_EBB \ diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index ee3ada66deb5..c41220f4aad9 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -203,7 +203,7 @@ do { \ #endif /* __powerpc64__ */ #define arch_has_single_step() (1) -#ifndef CONFIG_BOOK3S_601 +#ifndef CONFIG_PPC_BOOK3S_601 #define arch_has_block_step() (true) #else #define arch_has_block_step() (false) diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 3c1887351c71..bd227e0eab07 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -368,8 +368,6 @@ extern int rtas_set_indicator_fast(int indicator, int index, int new_value); extern void rtas_progress(char *s, unsigned short hex); extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); -extern int rtas_online_cpus_mask(cpumask_var_t cpus); -extern int rtas_offline_cpus_mask(cpumask_var_t cpus); extern int rtas_ibm_suspend_me(u64 handle); struct rtc_time; diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h index d2d2c4bd8435..6047402b0a4d 100644 --- a/arch/powerpc/include/asm/timex.h +++ b/arch/powerpc/include/asm/timex.h @@ -17,7 +17,7 @@ typedef unsigned long cycles_t; static inline cycles_t get_cycles(void) { - if (IS_ENABLED(CONFIG_BOOK3S_601)) + if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) return 0; return mftb(); diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index a460298c7ddb..f91ecb10d0ae 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -184,7 +184,7 @@ __init_LPCR_ISA300: __init_FSCR: mfspr r3,SPRN_FSCR - ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB + ori r3,r3,FSCR_TAR|FSCR_EBB mtspr SPRN_FSCR,r3 blr diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index e486d1d78de2..f4cb2c546adb 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -160,7 +160,8 @@ u64 dma_iommu_get_required_mask(struct device *dev) return bypass_mask; } - mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); + mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) + + tbl->it_page_shift - 1); mask += mask - 1; return mask; diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 05606025a131..3551f11accf0 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -346,6 +346,14 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f) { u64 lpcr; + /* + * Linux relies on FSCR[DSCR] being clear, so that we can take the + * facility unavailable interrupt and track the task's usage of DSCR. + * See facility_unavailable_exception(). + * Clear the bit here so that feat_enable() doesn't set it. + */ + f->fscr_bit_nr = -1; + feat_enable(f); lpcr = mfspr(SPRN_LPCR); diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index d0018dd17e0a..70ac8a6ba0c1 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1090,17 +1090,19 @@ EXC_COMMON_BEGIN(machine_check_idle_common) bl machine_check_queue_event /* - * We have not used any non-volatile GPRs here, and as a rule - * most exception code including machine check does not. - * Therefore PACA_NAPSTATELOST does not need to be set. Idle - * wakeup will restore volatile registers. + * GPR-loss wakeups are relatively straightforward, because the + * idle sleep code has saved all non-volatile registers on its + * own stack, and r1 in PACAR1. * - * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce. + * For no-loss wakeups the r1 and lr registers used by the + * early machine check handler have to be restored first. r2 is + * the kernel TOC, so no need to restore it. * * Then decrement MCE nesting after finishing with the stack. */ ld r3,_MSR(r1) ld r4,_LINK(r1) + ld r1,GPR1(r1) lhz r11,PACA_IN_MCE(r13) subi r11,r11,1 @@ -1109,7 +1111,7 @@ EXC_COMMON_BEGIN(machine_check_idle_common) mtlr r4 rlwinm r10,r3,47-31,30,31 cmpwi cr1,r10,2 - bltlr cr1 /* no state loss, return to idle caller */ + bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */ b idle_return_gpr_loss #endif diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index ed59855430b9..9b522152d8f0 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -38,8 +38,17 @@ static void __init fadump_reserve_crash_area(u64 base); #ifndef CONFIG_PRESERVE_FA_DUMP static DEFINE_MUTEX(fadump_mutex); -struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 }; -struct fadump_mrange_info reserved_mrange_info = { "reserved", NULL, 0, 0, 0 }; +struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false }; + +#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */ +#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \ + sizeof(struct fadump_memory_range)) +static struct fadump_memory_range rngs[RESERVED_RNGS_CNT]; +struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs, + RESERVED_RNGS_SZ, 0, + RESERVED_RNGS_CNT, true }; + +static void __init early_init_dt_scan_reserved_ranges(unsigned long node); #ifdef CONFIG_CMA static struct cma *fadump_cma; @@ -108,6 +117,11 @@ static int __init fadump_cma_init(void) { return 1; } int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data) { + if (depth == 0) { + early_init_dt_scan_reserved_ranges(node); + return 0; + } + if (depth != 1) return 0; @@ -429,10 +443,72 @@ static int __init fadump_get_boot_mem_regions(void) return ret; } +/* + * Returns true, if the given range overlaps with reserved memory ranges + * starting at idx. Also, updates idx to index of overlapping memory range + * with the given memory range. + * False, otherwise. + */ +static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx) +{ + bool ret = false; + int i; + + for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) { + u64 rbase = reserved_mrange_info.mem_ranges[i].base; + u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size; + + if (end <= rbase) + break; + + if ((end > rbase) && (base < rend)) { + *idx = i; + ret = true; + break; + } + } + + return ret; +} + +/* + * Locate a suitable memory area to reserve memory for FADump. While at it, + * lookup reserved-ranges & avoid overlap with them, as they are used by F/W. + */ +static u64 __init fadump_locate_reserve_mem(u64 base, u64 size) +{ + struct fadump_memory_range *mrngs; + phys_addr_t mstart, mend; + int idx = 0; + u64 i, ret = 0; + + mrngs = reserved_mrange_info.mem_ranges; + for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, + &mstart, &mend, NULL) { + pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n", + i, mstart, mend, base); + + if (mstart > base) + base = PAGE_ALIGN(mstart); + + while ((mend > base) && ((mend - base) >= size)) { + if (!overlaps_reserved_ranges(base, base+size, &idx)) { + ret = base; + goto out; + } + + base = mrngs[idx].base + mrngs[idx].size; + base = PAGE_ALIGN(base); + } + } + +out: + return ret; +} + int __init fadump_reserve_mem(void) { - u64 base, size, mem_boundary, bootmem_min, align = PAGE_SIZE; - bool is_memblock_bottom_up = memblock_bottom_up(); + u64 base, size, mem_boundary, bootmem_min; int ret = 1; if (!fw_dump.fadump_enabled) @@ -453,9 +529,9 @@ int __init fadump_reserve_mem(void) PAGE_ALIGN(fadump_calculate_reserve_size()); #ifdef CONFIG_CMA if (!fw_dump.nocma) { - align = FADUMP_CMA_ALIGNMENT; fw_dump.boot_memory_size = - ALIGN(fw_dump.boot_memory_size, align); + ALIGN(fw_dump.boot_memory_size, + FADUMP_CMA_ALIGNMENT); } #endif @@ -523,13 +599,9 @@ int __init fadump_reserve_mem(void) * Reserve memory at an offset closer to bottom of the RAM to * minimize the impact of memory hot-remove operation. */ - memblock_set_bottom_up(true); - base = memblock_find_in_range(base, mem_boundary, size, align); + base = fadump_locate_reserve_mem(base, size); - /* Restore the previous allocation mode */ - memblock_set_bottom_up(is_memblock_bottom_up); - - if (!base) { + if (!base || (base + size > mem_boundary)) { pr_err("Failed to find memory chunk for reservation!\n"); goto error_out; } @@ -726,10 +798,14 @@ void fadump_free_cpu_notes_buf(void) static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info) { + if (mrange_info->is_static) { + mrange_info->mem_range_cnt = 0; + return; + } + kfree(mrange_info->mem_ranges); - mrange_info->mem_ranges = NULL; - mrange_info->mem_ranges_sz = 0; - mrange_info->max_mem_ranges = 0; + memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0, + (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ)); } /* @@ -786,6 +862,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info, if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) { int ret; + if (mrange_info->is_static) { + pr_err("Reached array size limit for %s memory ranges\n", + mrange_info->name); + return -ENOSPC; + } + ret = fadump_alloc_mem_ranges(mrange_info); if (ret) return ret; @@ -1202,20 +1284,19 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info) * Scan reserved-ranges to consider them while reserving/releasing * memory for FADump. */ -static inline int fadump_scan_reserved_mem_ranges(void) +static void __init early_init_dt_scan_reserved_ranges(unsigned long node) { - struct device_node *root; const __be32 *prop; int len, ret = -1; unsigned long i; - root = of_find_node_by_path("/"); - if (!root) - return ret; + /* reserved-ranges already scanned */ + if (reserved_mrange_info.mem_range_cnt != 0) + return; - prop = of_get_property(root, "reserved-ranges", &len); + prop = of_get_flat_dt_prop(node, "reserved-ranges", &len); if (!prop) - return ret; + return; /* * Each reserved range is an (address,size) pair, 2 cells each, @@ -1237,7 +1318,8 @@ static inline int fadump_scan_reserved_mem_ranges(void) } } - return ret; + /* Compact reserved ranges */ + sort_and_merge_mem_ranges(&reserved_mrange_info); } /* @@ -1251,32 +1333,21 @@ static void fadump_release_memory(u64 begin, u64 end) u64 ra_start, ra_end, tstart; int i, ret; - fadump_scan_reserved_mem_ranges(); - ra_start = fw_dump.reserve_dump_area_start; ra_end = ra_start + fw_dump.reserve_dump_area_size; /* - * Add reserved dump area to reserved ranges list - * and exclude all these ranges while releasing memory. + * If reserved ranges array limit is hit, overwrite the last reserved + * memory range with reserved dump area to ensure it is excluded from + * the memory being released (reused for next FADump registration). */ - ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end); - if (ret != 0) { - /* - * Not enough memory to setup reserved ranges but the system is - * running shortage of memory. So, release all the memory except - * Reserved dump area (reused for next fadump registration). - */ - if (begin < ra_end && end > ra_start) { - if (begin < ra_start) - fadump_release_reserved_area(begin, ra_start); - if (end > ra_end) - fadump_release_reserved_area(ra_end, end); - } else - fadump_release_reserved_area(begin, end); + if (reserved_mrange_info.mem_range_cnt == + reserved_mrange_info.max_mem_ranges) + reserved_mrange_info.mem_range_cnt--; + ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end); + if (ret != 0) return; - } /* Get the reserved ranges list in order first. */ sort_and_merge_mem_ranges(&reserved_mrange_info); diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index a7f356d121ae..53aa8f23e09f 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -945,15 +945,8 @@ start_here_multiplatform: std r0,0(r4) #endif - /* The following gets the stack set up with the regs */ - /* pointing to the real addr of the kernel stack. This is */ - /* all done to support the C function call below which sets */ - /* up the htab. This is done because we have relocated the */ - /* kernel but are still running in real mode. */ - - LOAD_REG_ADDR(r3,init_thread_union) - /* set up a stack pointer */ + LOAD_REG_ADDR(r3,init_thread_union) LOAD_REG_IMMEDIATE(r1,THREAD_SIZE) add r1,r3,r1 li r0,0 diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index c4ed328a7b96..7a1c11a7cba5 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -114,11 +114,12 @@ void machine_kexec(struct kimage *image) void __init reserve_crashkernel(void) { - unsigned long long crash_size, crash_base; + unsigned long long crash_size, crash_base, total_mem_sz; int ret; + total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size(); /* use common parsing */ - ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), + ret = parse_crashkernel(boot_command_line, total_mem_sz, &crash_size, &crash_base); if (ret == 0 && crash_size > 0) { crashk_res.start = crash_base; @@ -177,6 +178,7 @@ void __init reserve_crashkernel(void) /* Crash kernel trumps memory limit */ if (memory_limit && memory_limit <= crashk_res.end) { memory_limit = crashk_res.end + 1; + total_mem_sz = memory_limit; printk("Adjusted memory limit for crashkernel, now 0x%llx\n", memory_limit); } @@ -185,7 +187,7 @@ void __init reserve_crashkernel(void) "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), (unsigned long)(crashk_res.start >> 20), - (unsigned long)(memblock_phys_mem_size() >> 20)); + (unsigned long)(total_mem_sz >> 20)); if (!memblock_is_region_memory(crashk_res.start, crash_size) || memblock_reserve(crashk_res.start, crash_size)) { diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 3f91ccaa9c74..4ea0cca52e16 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -86,7 +86,7 @@ static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align, * This is very early in boot, so no harm done if the kernel crashes at * this point. */ - BUG_ON(shared_lppaca_size >= shared_lppaca_total_size); + BUG_ON(shared_lppaca_size > shared_lppaca_total_size); return ptr; } diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 639ceae7da9d..bd0c258a1d5d 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1218,29 +1218,31 @@ struct task_struct *__switch_to(struct task_struct *prev, static void show_instructions(struct pt_regs *regs) { int i; + unsigned long nip = regs->nip; unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int)); printk("Instruction dump:"); + /* + * If we were executing with the MMU off for instructions, adjust pc + * rather than printing XXXXXXXX. + */ + if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) { + pc = (unsigned long)phys_to_virt(pc); + nip = (unsigned long)phys_to_virt(regs->nip); + } + for (i = 0; i < NR_INSN_TO_PRINT; i++) { int instr; if (!(i % 8)) pr_cont("\n"); -#if !defined(CONFIG_BOOKE) - /* If executing with the IMMU off, adjust pc rather - * than print XXXXXXXX. - */ - if (!(regs->msr & MSR_IR)) - pc = (unsigned long)phys_to_virt(pc); -#endif - if (!__kernel_text_address(pc) || probe_kernel_address((const void *)pc, instr)) { pr_cont("XXXXXXXX "); } else { - if (regs->nip == pc) + if (nip == pc) pr_cont("<%08x> ", instr); else pr_cont("%08x ", instr); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 6620f37abe73..e13e96e665e0 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -685,6 +685,23 @@ static void __init tm_init(void) static void tm_init(void) { } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ +#ifdef CONFIG_PPC64 +static void __init save_fscr_to_task(void) +{ + /* + * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we + * have configured via the device tree features or via __init_FSCR(). + * That value will then be propagated to pid 1 (init) and all future + * processes. + */ + if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) + init_task.thread.fscr = mfspr(SPRN_FSCR); +} +#else +static inline void save_fscr_to_task(void) {}; +#endif + + void __init early_init_devtree(void *params) { phys_addr_t limit; @@ -773,6 +790,8 @@ void __init early_init_devtree(void *params) BUG(); } + save_fscr_to_task(); + #if defined(CONFIG_SMP) && defined(CONFIG_PPC64) /* We'll later wait for secondaries to check in; there are * NCPUS-1 non-boot CPUs :-) diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index c5fa251b8950..01210593d60c 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -842,96 +842,6 @@ static void rtas_percpu_suspend_me(void *info) __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); } -enum rtas_cpu_state { - DOWN, - UP, -}; - -#ifndef CONFIG_SMP -static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, - cpumask_var_t cpus) -{ - if (!cpumask_empty(cpus)) { - cpumask_clear(cpus); - return -EINVAL; - } else - return 0; -} -#else -/* On return cpumask will be altered to indicate CPUs changed. - * CPUs with states changed will be set in the mask, - * CPUs with status unchanged will be unset in the mask. */ -static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, - cpumask_var_t cpus) -{ - int cpu; - int cpuret = 0; - int ret = 0; - - if (cpumask_empty(cpus)) - return 0; - - for_each_cpu(cpu, cpus) { - struct device *dev = get_cpu_device(cpu); - - switch (state) { - case DOWN: - cpuret = device_offline(dev); - break; - case UP: - cpuret = device_online(dev); - break; - } - if (cpuret < 0) { - pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", - __func__, - ((state == UP) ? "up" : "down"), - cpu, cpuret); - if (!ret) - ret = cpuret; - if (state == UP) { - /* clear bits for unchanged cpus, return */ - cpumask_shift_right(cpus, cpus, cpu); - cpumask_shift_left(cpus, cpus, cpu); - break; - } else { - /* clear bit for unchanged cpu, continue */ - cpumask_clear_cpu(cpu, cpus); - } - } - cond_resched(); - } - - return ret; -} -#endif - -int rtas_online_cpus_mask(cpumask_var_t cpus) -{ - int ret; - - ret = rtas_cpu_state_change_mask(UP, cpus); - - if (ret) { - cpumask_var_t tmp_mask; - - if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) - return ret; - - /* Use tmp_mask to preserve cpus mask from first failure */ - cpumask_copy(tmp_mask, cpus); - rtas_offline_cpus_mask(tmp_mask); - free_cpumask_var(tmp_mask); - } - - return ret; -} - -int rtas_offline_cpus_mask(cpumask_var_t cpus) -{ - return rtas_cpu_state_change_mask(DOWN, cpus); -} - int rtas_ibm_suspend_me(u64 handle) { long state; @@ -939,8 +849,6 @@ int rtas_ibm_suspend_me(u64 handle) unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; DECLARE_COMPLETION_ONSTACK(done); - cpumask_var_t offline_mask; - int cpuret; if (!rtas_service_present("ibm,suspend-me")) return -ENOSYS; @@ -961,9 +869,6 @@ int rtas_ibm_suspend_me(u64 handle) return -EIO; } - if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL)) - return -ENOMEM; - atomic_set(&data.working, 0); atomic_set(&data.done, 0); atomic_set(&data.error, 0); @@ -972,24 +877,8 @@ int rtas_ibm_suspend_me(u64 handle) lock_device_hotplug(); - /* All present CPUs must be online */ - cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); - cpuret = rtas_online_cpus_mask(offline_mask); - if (cpuret) { - pr_err("%s: Could not bring present CPUs online.\n", __func__); - atomic_set(&data.error, cpuret); - goto out; - } - cpu_hotplug_disable(); - /* Check if we raced with a CPU-Offline Operation */ - if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) { - pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__); - atomic_set(&data.error, -EAGAIN); - goto out_hotplug_enable; - } - /* Call function on all CPUs. One of us will make the * rtas call */ @@ -1000,18 +889,11 @@ int rtas_ibm_suspend_me(u64 handle) if (atomic_read(&data.error) != 0) printk(KERN_ERR "Error doing global join\n"); -out_hotplug_enable: + cpu_hotplug_enable(); - /* Take down CPUs not online prior to suspend */ - cpuret = rtas_offline_cpus_mask(offline_mask); - if (cpuret) - pr_warn("%s: Could not restore CPUs to offline state.\n", - __func__); - -out: unlock_device_hotplug(); - free_cpumask_var(offline_mask); + return atomic_read(&data.error); } #else /* CONFIG_PPC_PSERIES */ diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index eae9ddaecbcf..efb1ba40274a 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -682,7 +682,7 @@ int vdso_getcpu_init(void) node = cpu_to_node(cpu); WARN_ON_ONCE(node > 0xffff); - val = (cpu & 0xfff) | ((node & 0xffff) << 16); + val = (cpu & 0xffff) | ((node & 0xffff) << 16); mtspr(SPRN_SPRG_VDSO_WRITE, val); get_paca()->sprg_vdso = val; diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index ec2547cc5ecb..1ff971f3b06f 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -867,7 +867,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); } diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 2d415c36a61d..da8375437d16 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -38,7 +38,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */ if (kvmhv_on_pseries()) return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, - __pa(to), __pa(from), n); + (to != NULL) ? __pa(to): 0, + (from != NULL) ? __pa(from): 0, n); quadrant = 1; if (!pid) @@ -353,7 +354,13 @@ static struct kmem_cache *kvm_pmd_cache; static pte_t *kvmppc_pte_alloc(void) { - return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); + pte_t *pte; + + pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); + /* pmd_populate() will only reference _pa(pte). */ + kmemleak_ignore(pte); + + return pte; } static void kvmppc_pte_free(pte_t *ptep) @@ -363,7 +370,13 @@ static void kvmppc_pte_free(pte_t *ptep) static pmd_t *kvmppc_pmd_alloc(void) { - return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); + pmd_t *pmd; + + pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); + /* pud_populate() will only reference _pa(pmd). */ + kmemleak_ignore(pmd); + + return pmd; } static void kvmppc_pmd_free(pmd_t *pmdp) diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 5834db0a54c6..03b947429e4d 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -74,6 +74,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, struct kvmppc_spapr_tce_iommu_table *stit, *tmp; struct iommu_table_group *table_group = NULL; + rcu_read_lock(); list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { table_group = iommu_group_get_iommudata(grp); @@ -88,7 +89,9 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, kref_put(&stit->kref, kvm_spapr_tce_liobn_put); } } + cond_resched_rcu(); } + rcu_read_unlock(); } extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, @@ -106,12 +109,14 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, if (!f.file) return -EBADF; + rcu_read_lock(); list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { if (stt == f.file->private_data) { found = true; break; } } + rcu_read_unlock(); fdput(f); @@ -144,6 +149,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, if (!tbl) return -EINVAL; + rcu_read_lock(); list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { if (tbl != stit->tbl) continue; @@ -151,14 +157,17 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, if (!kref_get_unless_zero(&stit->kref)) { /* stit is being destroyed */ iommu_tce_table_put(tbl); + rcu_read_unlock(); return -ENOTTY; } /* * The table is already known to this KVM, we just increased * its KVM reference counter and can return. */ + rcu_read_unlock(); return 0; } + rcu_read_unlock(); stit = kzalloc(sizeof(*stit), GFP_KERNEL); if (!stit) { @@ -364,18 +373,19 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, if (kvmppc_tce_to_ua(stt->kvm, tce, &ua)) return H_TOO_HARD; + rcu_read_lock(); list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { unsigned long hpa = 0; struct mm_iommu_table_group_mem_t *mem; long shift = stit->tbl->it_page_shift; mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift); - if (!mem) - return H_TOO_HARD; - - if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) + if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) { + rcu_read_unlock(); return H_TOO_HARD; + } } + rcu_read_unlock(); return H_SUCCESS; } diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 321db0fdb9db..7154bd424d24 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) return 0; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { /* kvm_unmap_hva flushes everything anyways */ kvm_unmap_hva(kvm, start); diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 84d5fab94f8f..1424a120710e 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -187,6 +187,7 @@ void mmu_mark_initmem_nx(void) int i; unsigned long base = (unsigned long)_stext - PAGE_OFFSET; unsigned long top = (unsigned long)_etext - PAGE_OFFSET; + unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; unsigned long size; if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) @@ -201,9 +202,10 @@ void mmu_mark_initmem_nx(void) size = block_size(base, top); size = max(size, 128UL << 10); if ((top - base) > size) { - if (strict_kernel_rwx_enabled()) - pr_warn("Kernel _etext not properly aligned\n"); size <<= 1; + if (strict_kernel_rwx_enabled() && base + size > border) + pr_warn("Some RW data is getting mapped X. " + "Adjust CONFIG_DATA_SHIFT to avoid that.\n"); } setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); base += size; diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index ae7fca40e5b3..432fd9fa8c3f 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -83,13 +83,17 @@ static int pkey_initialize(void) scan_pkey_feature(); /* - * Let's assume 32 pkeys on P8 bare metal, if its not defined by device - * tree. We make this exception since skiboot forgot to expose this - * property on power8. + * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device + * tree. We make this exception since some version of skiboot forgot to + * expose this property on power8/9. */ - if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR) && - cpu_has_feature(CPU_FTRS_POWER8)) - pkeys_total = 32; + if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR)) { + unsigned long pvr = mfspr(SPRN_PVR); + + if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || + PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9) + pkeys_total = 32; + } /* * Adjust the upper limit, based on the number of bits supported by @@ -367,12 +371,14 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute) return true; pkey_shift = pkeyshift(pkey); - if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift))) - return true; + if (execute) + return !(read_iamr() & (IAMR_EX_BIT << pkey_shift)); - amr = read_amr(); /* Delay reading amr until absolutely needed */ - return ((!write && !(amr & (AMR_RD_BIT << pkey_shift))) || - (write && !(amr & (AMR_WR_BIT << pkey_shift)))); + amr = read_amr(); + if (write) + return !(amr & (AMR_WR_BIT << pkey_shift)); + + return !(amr & (AMR_RD_BIT << pkey_shift)); } bool arch_pte_access_permitted(u64 pte, bool write, bool execute) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 6ee17d09649c..770542ccdb46 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -643,21 +643,6 @@ void radix__mmu_cleanup_all(void) } } -void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, - phys_addr_t first_memblock_size) -{ - /* - * We don't currently support the first MEMBLOCK not mapping 0 - * physical on those processors - */ - BUG_ON(first_memblock_base != 0); - - /* - * Radix mode is not limited by RMA / VRMA addressing. - */ - ppc64_rma_size = ULONG_MAX; -} - #ifdef CONFIG_MEMORY_HOTPLUG static void free_pte_table(pte_t *pte_start, pmd_t *pmd) { diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 881a026a603a..187047592d53 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -241,6 +241,9 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, return false; } +// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE +#define SIGFRAME_MAX_SIZE (4096 + 128) + static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, struct vm_area_struct *vma, unsigned int flags, bool *must_retry) @@ -248,7 +251,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, /* * N.B. The POWER/Open ABI allows programs to access up to * 288 bytes below the stack pointer. - * The kernel signal delivery code writes up to about 1.5kB + * The kernel signal delivery code writes a bit over 4KB * below the stack pointer (r1) before decrementing it. * The exec code can write slightly over 640kB to the stack * before setting the user r1. Thus we allow the stack to @@ -273,7 +276,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, * between the last mapped region and the stack will * expand the stack rather than segfaulting. */ - if (address + 2048 >= uregs->gpr[1]) + if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1]) return false; if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 4e08246acd79..210f1c28b8e4 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -415,9 +415,16 @@ void __init mmu_early_init_devtree(void) if (!(mfmsr() & MSR_HV)) early_check_vec5(); - if (early_radix_enabled()) + if (early_radix_enabled()) { radix__early_init_devtree(); - else + /* + * We have finalized the translation we are going to use by now. + * Radix mode is not limited by RMA / VRMA addressing. + * Hence don't limit memblock allocations. + */ + ppc64_rma_size = ULONG_MAX; + memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); + } else hash__early_init_devtree(); } #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 784cae9f5697..da9f722d9f16 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -207,7 +207,7 @@ void mark_initmem_nx(void) unsigned long numpages = PFN_UP((unsigned long)_einittext) - PFN_DOWN((unsigned long)_sinittext); - if (v_block_mapped((unsigned long)_stext + 1)) + if (v_block_mapped((unsigned long)_sinittext)) mmu_mark_initmem_nx(); else change_page_attr(page, numpages, PAGE_KERNEL); @@ -219,7 +219,7 @@ void mark_rodata_ro(void) struct page *page; unsigned long numpages; - if (v_block_mapped((unsigned long)_sinittext)) { + if (v_block_mapped((unsigned long)_stext + 1)) { mmu_mark_rodata_ro(); ptdump_check_wx(); return; diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c index a07278027c6f..a2e8c3b2cf35 100644 --- a/arch/powerpc/mm/ptdump/hashpagetable.c +++ b/arch/powerpc/mm/ptdump/hashpagetable.c @@ -259,7 +259,7 @@ static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 * for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); - if (lpar_rc != H_SUCCESS) + if (lpar_rc) continue; for (j = 0; j < 4; j++) { if (HPTE_V_COMPARE(ptes[j].v, want_v) && diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c index f7ed2f187cb0..784f8df17f73 100644 --- a/arch/powerpc/mm/ptdump/shared.c +++ b/arch/powerpc/mm/ptdump/shared.c @@ -30,6 +30,11 @@ static const struct flag_info flag_array[] = { .val = _PAGE_PRESENT, .set = "present", .clear = " ", + }, { + .mask = _PAGE_COHERENT, + .val = _PAGE_COHERENT, + .set = "coherent", + .clear = " ", }, { .mask = _PAGE_GUARDED, .val = _PAGE_GUARDED, diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index ca92e01d0bd1..f582aa2d9807 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1522,9 +1522,16 @@ nocheck: ret = 0; out: if (has_branch_stack(event)) { - power_pmu_bhrb_enable(event); - cpuhw->bhrb_filter = ppmu->bhrb_filter_map( - event->attr.branch_sample_type); + u64 bhrb_filter = -1; + + if (ppmu->bhrb_filter_map) + bhrb_filter = ppmu->bhrb_filter_map( + event->attr.branch_sample_type); + + if (bhrb_filter != -1) { + cpuhw->bhrb_filter = bhrb_filter; + power_pmu_bhrb_enable(event); + } } perf_pmu_enable(event->pmu); @@ -1846,7 +1853,6 @@ static int power_pmu_event_init(struct perf_event *event) int n; int err; struct cpu_hw_events *cpuhw; - u64 bhrb_filter; if (!ppmu) return -ENOENT; @@ -1952,7 +1958,10 @@ static int power_pmu_event_init(struct perf_event *event) err = power_check_constraints(cpuhw, events, cflags, n + 1); if (has_branch_stack(event)) { - bhrb_filter = ppmu->bhrb_filter_map( + u64 bhrb_filter = -1; + + if (ppmu->bhrb_filter_map) + bhrb_filter = ppmu->bhrb_filter_map( event->attr.branch_sample_type); if (bhrb_filter == -1) { @@ -2106,6 +2115,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (perf_event_overflow(event, &data, regs)) power_pmu_stop(event, 0); + } else if (period) { + /* Account for interrupt in case of invalid SIAR */ + if (perf_event_account_interrupt(event)) + power_pmu_stop(event, 0); } } diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 573e0b309c0c..48e8f4b17b91 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -1400,16 +1400,6 @@ static void h_24x7_event_read(struct perf_event *event) h24x7hw = &get_cpu_var(hv_24x7_hw); h24x7hw->events[i] = event; put_cpu_var(h24x7hw); - /* - * Clear the event count so we can compute the _change_ - * in the 24x7 raw counter value at the end of the txn. - * - * Note that we could alternatively read the 24x7 value - * now and save its value in event->hw.prev_count. But - * that would require issuing a hcall, which would then - * defeat the purpose of using the txn interface. - */ - local64_set(&event->count, 0); } put_cpu_var(hv_24x7_reqb); diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c index e6e2adcc7b64..c13d64c3b019 100644 --- a/arch/powerpc/platforms/4xx/pci.c +++ b/arch/powerpc/platforms/4xx/pci.c @@ -1242,7 +1242,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) if (mbase == NULL) { printk(KERN_ERR "%pOF: Can't map internal config space !", port->node); - goto done; + return; } while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA) @@ -1252,9 +1252,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port) } if (attempt) port->link = 1; -done: iounmap(mbase); - } static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 0f7c8241912b..f2ff359041ee 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -44,6 +44,7 @@ config SPU_FS tristate "SPU file system" default m depends on PPC_CELL + depends on COREDUMP select SPU_BASE help The SPU file system is used to access Synergistic Processing diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index c0f950a3f4e1..f4a4dfb191e7 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -1978,8 +1978,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { - int ret; struct spu_context *ctx = file->private_data; + u32 stat, data; + int ret; if (!access_ok(buf, len)) return -EFAULT; @@ -1988,11 +1989,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_mbox_info_read(ctx, buf, len, pos); + stat = ctx->csa.prob.mb_stat_R; + data = ctx->csa.prob.pu_mb_R; spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + /* EOF if there's no entry in the mbox */ + if (!(stat & 0x0000ff)) + return 0; + + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); } static const struct file_operations spufs_mbox_info_fops = { @@ -2019,6 +2025,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; + u32 stat, data; int ret; if (!access_ok(buf, len)) @@ -2028,11 +2035,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_ibox_info_read(ctx, buf, len, pos); + stat = ctx->csa.prob.mb_stat_R; + data = ctx->csa.priv2.puint_mb_R; spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + /* EOF if there's no entry in the ibox */ + if (!(stat & 0xff0000)) + return 0; + + return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); } static const struct file_operations spufs_ibox_info_fops = { @@ -2041,6 +2053,11 @@ static const struct file_operations spufs_ibox_info_fops = { .llseek = generic_file_llseek, }; +static size_t spufs_wbox_info_cnt(struct spu_context *ctx) +{ + return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32); +} + static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { @@ -2049,7 +2066,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, u32 wbox_stat; wbox_stat = ctx->csa.prob.mb_stat_R; - cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); + cnt = spufs_wbox_info_cnt(ctx); for (i = 0; i < cnt; i++) { data[i] = ctx->csa.spu_mailbox_data[i]; } @@ -2062,7 +2079,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; - int ret; + u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)]; + int ret, count; if (!access_ok(buf, len)) return -EFAULT; @@ -2071,11 +2089,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_wbox_info_read(ctx, buf, len, pos); + count = spufs_wbox_info_cnt(ctx); + memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data)); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + return simple_read_from_buffer(buf, len, pos, &data, + count * sizeof(u32)); } static const struct file_operations spufs_wbox_info_fops = { @@ -2084,27 +2104,33 @@ static const struct file_operations spufs_wbox_info_fops = { .llseek = generic_file_llseek, }; -static ssize_t __spufs_dma_info_read(struct spu_context *ctx, - char __user *buf, size_t len, loff_t *pos) +static void spufs_get_dma_info(struct spu_context *ctx, + struct spu_dma_info *info) { - struct spu_dma_info info; - struct mfc_cq_sr *qp, *spuqp; int i; - info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; - info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; - info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; - info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; - info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; + info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; + info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; + info->dma_info_status = ctx->csa.spu_chnldata_RW[24]; + info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; + info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; for (i = 0; i < 16; i++) { - qp = &info.dma_info_command_data[i]; - spuqp = &ctx->csa.priv2.spuq[i]; + struct mfc_cq_sr *qp = &info->dma_info_command_data[i]; + struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i]; qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; } +} + +static ssize_t __spufs_dma_info_read(struct spu_context *ctx, + char __user *buf, size_t len, loff_t *pos) +{ + struct spu_dma_info info; + + spufs_get_dma_info(ctx, &info); return simple_read_from_buffer(buf, len, pos, &info, sizeof info); @@ -2114,6 +2140,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; + struct spu_dma_info info; int ret; if (!access_ok(buf, len)) @@ -2123,11 +2150,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_dma_info_read(ctx, buf, len, pos); + spufs_get_dma_info(ctx, &info); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + return simple_read_from_buffer(buf, len, pos, &info, + sizeof(info)); } static const struct file_operations spufs_dma_info_fops = { @@ -2136,13 +2164,31 @@ static const struct file_operations spufs_dma_info_fops = { .llseek = no_llseek, }; +static void spufs_get_proxydma_info(struct spu_context *ctx, + struct spu_proxydma_info *info) +{ + int i; + + info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW; + info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; + info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; + + for (i = 0; i < 8; i++) { + struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i]; + struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i]; + + qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; + qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; + qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; + qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; + } +} + static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, char __user *buf, size_t len, loff_t *pos) { struct spu_proxydma_info info; - struct mfc_cq_sr *qp, *puqp; int ret = sizeof info; - int i; if (len < ret) return -EINVAL; @@ -2150,18 +2196,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, if (!access_ok(buf, len)) return -EFAULT; - info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; - info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; - info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; - for (i = 0; i < 8; i++) { - qp = &info.proxydma_info_command_data[i]; - puqp = &ctx->csa.priv2.puq[i]; - - qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; - qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; - qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; - qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; - } + spufs_get_proxydma_info(ctx, &info); return simple_read_from_buffer(buf, len, pos, &info, sizeof info); @@ -2171,17 +2206,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; + struct spu_proxydma_info info; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); - ret = __spufs_proxydma_info_read(ctx, buf, len, pos); + spufs_get_proxydma_info(ctx, &info); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); - return ret; + return simple_read_from_buffer(buf, len, pos, &info, + sizeof(info)); } static const struct file_operations spufs_proxydma_info_fops = { diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 13e251699346..b2ba3e95bda7 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -167,7 +167,6 @@ static void pnv_smp_cpu_kill_self(void) /* Standard hot unplug procedure */ idle_task_exit(); - current->active_mm = NULL; /* for sanity */ cpu = smp_processor_id(); DBG("CPU%d offline\n", cpu); generic_set_cpu_dead(cpu); diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 423be34f0f5f..f42fe4e86ce5 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -200,13 +200,14 @@ void ps3_mm_vas_destroy(void) { int result; - DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id); - if (map.vas_id) { result = lv1_select_virtual_address_space(0); - BUG_ON(result); - result = lv1_destruct_virtual_address_space(map.vas_id); - BUG_ON(result); + result += lv1_destruct_virtual_address_space(map.vas_id); + + if (result) { + lv1_panic(0); + } + map.vas_id = 0; } } @@ -304,19 +305,20 @@ static void ps3_mm_region_destroy(struct mem_region *r) int result; if (!r->destroy) { - pr_info("%s:%d: Not destroying high region: %llxh %llxh\n", - __func__, __LINE__, r->base, r->size); return; } - DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base); - if (r->base) { result = lv1_release_memory(r->base); - BUG_ON(result); + + if (result) { + lv1_panic(0); + } + r->size = r->base = r->offset = 0; map.total = map.rm.size; } + ps3_mm_set_repository_highmem(NULL); } diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index f1888352b4e0..e7d23a933a0d 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -27,7 +27,7 @@ static bool rtas_hp_event; unsigned long pseries_memory_block_size(void) { struct device_node *np; - unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; + u64 memblock_size = MIN_MEMORY_BLOCK_SIZE; struct resource r; np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 753adeb624f2..b3c4848869e5 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -184,7 +184,6 @@ static void handle_system_shutdown(char event_modifier) case EPOW_SHUTDOWN_ON_UPS: pr_emerg("Loss of system power detected. System is running on" " UPS/battery. Check RTAS error log for details\n"); - orderly_poweroff(true); break; case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS: @@ -395,10 +394,11 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) /* * Some versions of FWNMI place the buffer inside the 4kB page starting at * 0x7000. Other versions place it inside the rtas buffer. We check both. + * Minimum size of the buffer is 16 bytes. */ #define VALID_FWNMI_BUFFER(A) \ - ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \ - (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16)))) + ((((A) >= 0x7000) && ((A) <= 0x8000 - 16)) || \ + (((A) >= rtas.base) && ((A) <= (rtas.base + rtas.size - 16)))) static inline struct rtas_error_log *fwnmi_get_errlog(void) { diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index 0a24a5a185f0..f789693f61f4 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c @@ -132,15 +132,11 @@ static ssize_t store_hibernate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - cpumask_var_t offline_mask; int rc; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL)) - return -ENOMEM; - stream_id = simple_strtoul(buf, NULL, 16); do { @@ -150,32 +146,16 @@ static ssize_t store_hibernate(struct device *dev, } while (rc == -EAGAIN); if (!rc) { - /* All present CPUs must be online */ - cpumask_andnot(offline_mask, cpu_present_mask, - cpu_online_mask); - rc = rtas_online_cpus_mask(offline_mask); - if (rc) { - pr_err("%s: Could not bring present CPUs online.\n", - __func__); - goto out; - } - stop_topology_update(); rc = pm_suspend(PM_SUSPEND_MEM); start_topology_update(); - - /* Take down CPUs not online prior to suspend */ - if (!rtas_offline_cpus_mask(offline_mask)) - pr_warn("%s: Could not restore CPUs to offline " - "state.\n", __func__); } stream_id = 0; if (!rc) rc = count; -out: - free_cpumask_var(offline_mask); + return rc; } diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 50e1a8e02497..3fd086533dcf 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -646,6 +647,7 @@ static bool xive_native_provision_pages(void) pr_err("Failed to allocate provisioning page\n"); return false; } + kmemleak_ignore(p); opal_xive_donate_page(chip, __pa(p)); } return true; diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 3f1737f301cc..d0e24aaa2aa0 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -58,8 +58,16 @@ do { \ * The AQ/RL pair provides a RCpc critical section, but there's not really any * way we can take advantage of that here because the ordering is only enforced * on that one lock. Thus, we're just doing a full fence. + * + * Since we allow writeX to be called from preemptive regions we need at least + * an "o" in the predecessor set to ensure device writes are visible before the + * task is marked as available for scheduling on a new hart. While I don't see + * any concrete reason we need a full IO fence, it seems safer to just upgrade + * this in order to avoid any IO crossing a scheduling boundary. In both + * instances the scheduler pairs this with an mb(), so nothing is necessary on + * the new hart. */ -#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw) +#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) #include diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index d969bab4a26b..262e5bbb2776 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -179,7 +179,7 @@ " bnez %1, 0b\n" \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ @@ -224,7 +224,7 @@ RISCV_ACQUIRE_BARRIER \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ @@ -270,7 +270,7 @@ " bnez %1, 0b\n" \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ @@ -316,7 +316,7 @@ " fence rw, rw\n" \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 905372d7eeb8..3df9a82bdbfd 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -12,7 +12,11 @@ #include /* thread information allocation */ +#ifdef CONFIG_64BIT +#define THREAD_SIZE_ORDER (2) +#else #define THREAD_SIZE_ORDER (1) +#endif #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #ifndef __ASSEMBLY__ diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index f3619f59d85c..12f8a7fce78b 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -8,6 +8,7 @@ #include #include #include +#include static long riscv_sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, @@ -16,6 +17,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len, { if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) return -EINVAL; + + if ((prot & PROT_WRITE) && (prot & PROT_EXEC)) + if (unlikely(!(prot & PROT_READ))) + return -EINVAL; + return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - page_shift_offset)); } diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 319812923012..d49e334071d4 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -115,9 +115,9 @@ void __init setup_bootmem(void) /* Reserve from the start of the kernel to the end of the kernel */ memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); - set_max_mapnr(PFN_DOWN(mem_size)); max_pfn = PFN_DOWN(memblock_end_of_DRAM()); max_low_pfn = max_pfn; + set_max_mapnr(max_low_pfn); #ifdef CONFIG_BLK_DEV_INITRD setup_initrd(); @@ -167,12 +167,11 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) ptep = &fixmap_pte[pte_index(addr)]; - if (pgprot_val(prot)) { + if (pgprot_val(prot)) set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); - } else { + else pte_clear(&init_mm, addr, ptep); - local_flush_tlb_page(addr); - } + local_flush_tlb_page(addr); } static pte_t *__init get_pte_virt(phys_addr_t pa) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index abe60268335d..0fe5600a037e 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -31,12 +31,12 @@ #define KVM_USER_MEM_SLOTS 32 /* - * These seem to be used for allocating ->chip in the routing table, - * which we don't use. 4096 is an out-of-thin-air value. If we need - * to look at ->chip later on, we'll need to revisit this. + * These seem to be used for allocating ->chip in the routing table, which we + * don't use. 1 is as small as we can get to reduce the needed memory. If we + * need to look at ->chip later on, we'll need to revisit this. */ #define KVM_NR_IRQCHIPS 1 -#define KVM_IRQCHIP_NUM_PINS 4096 +#define KVM_IRQCHIP_NUM_PINS 1 #define KVM_HALT_POLL_NS_DEFAULT 50000 /* s390-specific vcpu->requests bit members */ diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h index 35f8cbe7e5bb..c759dcffa9ea 100644 --- a/arch/s390/include/asm/numa.h +++ b/arch/s390/include/asm/numa.h @@ -17,7 +17,6 @@ void numa_setup(void); int numa_pfn_to_nid(unsigned long pfn); -int __node_distance(int a, int b); void numa_update_cpu_topology(void); extern cpumask_t node_to_cpumask_map[MAX_NUMNODES]; diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 50b4ce8cddfd..918f0ba4f4d2 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -29,7 +29,7 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ old__, new__, prev__; \ pcp_op_T__ *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ prev__ = *ptr__; \ do { \ @@ -37,7 +37,7 @@ new__ = old__ op (val); \ prev__ = cmpxchg(ptr__, old__, new__); \ } while (prev__ != old__); \ - preempt_enable(); \ + preempt_enable_notrace(); \ new__; \ }) @@ -68,7 +68,7 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ if (__builtin_constant_p(val__) && \ ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ @@ -84,7 +84,7 @@ : [val__] "d" (val__) \ : "cc"); \ } \ - preempt_enable(); \ + preempt_enable_notrace(); \ } #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int) @@ -95,14 +95,14 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ op " %[old__],%[val__],%[ptr__]\n" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ - preempt_enable(); \ + preempt_enable_notrace(); \ old__ + val__; \ }) @@ -114,14 +114,14 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ op " %[old__],%[val__],%[ptr__]\n" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ - preempt_enable(); \ + preempt_enable_notrace(); \ } #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan") @@ -136,10 +136,10 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ ret__; \ pcp_op_T__ *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ ret__ = cmpxchg(ptr__, oval, nval); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) @@ -152,10 +152,10 @@ ({ \ typeof(pcp) *ptr__; \ typeof(pcp) ret__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ ret__ = xchg(ptr__, nval); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) @@ -171,11 +171,11 @@ typeof(pcp1) *p1__; \ typeof(pcp2) *p2__; \ int ret__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ p1__ = raw_cpu_ptr(&(pcp1)); \ p2__ = raw_cpu_ptr(&(pcp2)); \ ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index f073292e9fdb..d9d5de0f67ff 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -33,7 +33,17 @@ static inline void syscall_rollback(struct task_struct *task, static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { - return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0; + unsigned long error = regs->gprs[2]; +#ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_31BIT)) { + /* + * Sign-extend the value so (int)-EFOO becomes (long)-EFOO + * and will match correctly in comparisons. + */ + error = (long)(int)error; + } +#endif + return IS_ERR_VALUE(error) ? error : 0; } static inline long syscall_get_return_value(struct task_struct *task, diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index cca406fdbe51..ef9dd253dfad 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -83,8 +83,6 @@ static inline const struct cpumask *cpumask_of_node(int node) #define pcibus_to_node(bus) __pcibus_to_node(bus) -#define node_distance(a, b) __node_distance(a, b) - #else /* !CONFIG_NUMA */ #define numa_node_id numa_node_id diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index a470f1fa9f2a..324438889fe1 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -276,6 +276,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo } int copy_to_user_real(void __user *dest, void *src, unsigned long count); -void s390_kernel_write(void *dst, const void *src, size_t size); +void *s390_kernel_write(void *dst, const void *src, size_t size); #endif /* __S390_UACCESS_H */ diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index 169d7604eb80..f3ba84fa9bd1 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -36,6 +36,7 @@ struct vdso_data { __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */ __u32 ts_dir; /* TOD steering direction 0x64 */ __u64 ts_end; /* TOD steering end 0x68 */ + __u32 hrtimer_res; /* hrtimer resolution 0x70 */ }; struct vdso_per_cpu_data { diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index b6628586ab70..a65cb4924bdb 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -76,6 +76,7 @@ int main(void) OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift); OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir); OFFSET(__VDSO_TS_END, vdso_data, ts_end); + OFFSET(__VDSO_CLOCK_REALTIME_RES, vdso_data, hrtimer_res); OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base); OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time); OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr); @@ -87,7 +88,6 @@ int main(void) DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE); DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID); - DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC); BLANK(); /* idle data offsets */ diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 6d321f5f101d..7184d55d87aa 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -198,9 +198,10 @@ static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas) if (!areas) goto fail_malloc_areas; for (i = 0; i < nr_areas; i++) { + /* GFP_NOWARN to avoid user triggerable WARN, we handle fails */ areas[i] = kmalloc_array(pages_per_area, sizeof(debug_entry_t *), - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (!areas[i]) goto fail_malloc_areas2; for (j = 0; j < pages_per_area; j++) { diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index b432d63d0b37..2531776cf6cf 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -169,6 +169,8 @@ static noinline __init void setup_lowcore_early(void) psw_t psw; psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; + if (IS_ENABLED(CONFIG_KASAN)) + psw.mask |= PSW_MASK_DAT; psw.addr = (unsigned long) s390_base_ext_handler; S390_lowcore.external_new_psw = psw; psw.addr = (unsigned long) s390_base_pgm_handler; diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index bc85987727f0..c544b7a11ebb 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -368,9 +368,9 @@ ENTRY(system_call) jnz .Lsysc_nr_ok # svc 0: system call number in %r1 llgfr %r1,%r1 # clear high word in r1 + sth %r1,__PT_INT_CODE+2(%r11) cghi %r1,NR_syscalls jnl .Lsysc_nr_ok - sth %r1,__PT_INT_CODE+2(%r11) slag %r8,%r1,3 .Lsysc_nr_ok: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index ad71132374f0..c6aef2ecf289 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -324,6 +324,25 @@ static inline void __poke_user_per(struct task_struct *child, child->thread.per_user.end = data; } +static void fixup_int_code(struct task_struct *child, addr_t data) +{ + struct pt_regs *regs = task_pt_regs(child); + int ilc = regs->int_code >> 16; + u16 insn; + + if (ilc > 6) + return; + + if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16), + &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn)) + return; + + /* double check that tracee stopped on svc instruction */ + if ((insn >> 8) != 0xa) + return; + + regs->int_code = 0x20000 | (data & 0xffff); +} /* * Write a word to the user area of a process at location addr. This * operation does have an additional problem compared to peek_user. @@ -335,7 +354,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) struct user *dummy = NULL; addr_t offset; + if (addr < (addr_t) &dummy->regs.acrs) { + struct pt_regs *regs = task_pt_regs(child); /* * psw and gprs are stored on the stack */ @@ -353,7 +374,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) /* Invalid addressing mode bits */ return -EINVAL; } - *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; + + if (test_pt_regs_flag(regs, PIF_SYSCALL) && + addr == offsetof(struct user, regs.gprs[2])) + fixup_int_code(child, data); + *(addr_t *)((addr_t) ®s->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { /* @@ -719,6 +744,10 @@ static int __poke_user_compat(struct task_struct *child, regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | (__u64)(tmp & PSW32_ADDR_AMODE); } else { + + if (test_pt_regs_flag(regs, PIF_SYSCALL) && + addr == offsetof(struct compat_user, regs.gprs[2])) + fixup_int_code(child, data); /* gpr 0-15 */ *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; } @@ -844,11 +873,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) * call number to gprs[2]. */ if (test_thread_flag(TIF_SYSCALL_TRACE) && - (tracehook_report_syscall_entry(regs) || - regs->gprs[2] >= NR_syscalls)) { + tracehook_report_syscall_entry(regs)) { /* - * Tracing decided this syscall should not happen or the - * debugger stored an invalid system call number. Skip + * Tracing decided this syscall should not happen. Skip * the system call and the system call restart handling. */ clear_pt_regs_flag(regs, PIF_SYSCALL); @@ -1256,7 +1283,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb) cb->pc == 1 && cb->qc == 0 && cb->reserved2 == 0 && - cb->key == PAGE_DEFAULT_KEY && cb->reserved3 == 0 && cb->reserved4 == 0 && cb->reserved5 == 0 && @@ -1320,7 +1346,11 @@ static int s390_runtime_instr_set(struct task_struct *target, kfree(data); return -EINVAL; } - + /* + * Override access key in any case, since user space should + * not be able to set it, nor should it care about it. + */ + ri_cb.key = PAGE_DEFAULT_KEY >> 4; preempt_disable(); if (!target->thread.ri_cb) target->thread.ri_cb = data; diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 125c7f6e8715..1788a5454b6f 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb) cb->k = 1; cb->ps = 1; cb->pc = 1; - cb->key = PAGE_DEFAULT_KEY; + cb->key = PAGE_DEFAULT_KEY >> 4; cb->v = 1; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 4366962f4930..07b2b61a0289 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -1120,6 +1120,7 @@ void __init setup_arch(char **cmdline_p) if (IS_ENABLED(CONFIG_EXPOLINE_AUTO)) nospec_auto_detect(); + jump_label_init(); parse_early_param(); #ifdef CONFIG_CRASH_DUMP /* Deactivate elfcorehdr= kernel parameter */ diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index e8766beee5ad..8ea9db599d38 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -310,6 +310,7 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->tk_mult = tk->tkr_mono.mult; vdso_data->tk_shift = tk->tkr_mono.shift; + vdso_data->hrtimer_res = hrtimer_resolution; smp_wmb(); ++vdso_data->tb_update_count; } diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index bec19e7e6e1c..4a66a1cb919b 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -18,8 +18,8 @@ KBUILD_AFLAGS_64 += -m64 -s KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin -KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ - -Wl,--hash-style=both +ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \ + --hash-style=both --build-id -T $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64) @@ -37,8 +37,8 @@ KASAN_SANITIZE := n $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE - $(call if_changed,vdso64ld) +$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) FORCE + $(call if_changed,ld) # strip rule for the .so file $(obj)/%.so: OBJCOPYFLAGS := -S @@ -50,8 +50,6 @@ $(obj-vdso64): %.o: %.S FORCE $(call if_changed_dep,vdso64as) # actual build commands -quiet_cmd_vdso64ld = VDSO64L $@ - cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@ quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S index 081435398e0a..0c79caa32b59 100644 --- a/arch/s390/kernel/vdso64/clock_getres.S +++ b/arch/s390/kernel/vdso64/clock_getres.S @@ -17,12 +17,14 @@ .type __kernel_clock_getres,@function __kernel_clock_getres: CFI_STARTPROC - larl %r1,4f + larl %r1,3f + lg %r0,0(%r1) cghi %r2,__CLOCK_REALTIME_COARSE je 0f cghi %r2,__CLOCK_MONOTONIC_COARSE je 0f - larl %r1,3f + larl %r1,_vdso_data + llgf %r0,__VDSO_CLOCK_REALTIME_RES(%r1) cghi %r2,__CLOCK_REALTIME je 0f cghi %r2,__CLOCK_MONOTONIC @@ -36,7 +38,6 @@ __kernel_clock_getres: jz 2f 0: ltgr %r3,%r3 jz 1f /* res == NULL */ - lg %r0,0(%r1) xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */ stg %r0,8(%r3) /* store tp->tv_usec */ 1: lghi %r2,0 @@ -45,6 +46,5 @@ __kernel_clock_getres: svc 0 br %r14 CFI_ENDPROC -3: .quad __CLOCK_REALTIME_RES -4: .quad __CLOCK_COARSE_RES +3: .quad __CLOCK_COARSE_RES .size __kernel_clock_getres,.-__kernel_clock_getres diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 364e3a89c096..4fa7a562c6fc 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2485,23 +2485,36 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4], } EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct vm_area_struct *vma = walk->vma; + + split_huge_pmd(vma, pmd, addr); + return 0; +} + +static const struct mm_walk_ops thp_split_walk_ops = { + .pmd_entry = thp_split_walk_pmd_entry, +}; + static inline void thp_split_mm(struct mm_struct *mm) { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE struct vm_area_struct *vma; - unsigned long addr; for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { - for (addr = vma->vm_start; - addr < vma->vm_end; - addr += PAGE_SIZE) - follow_page(vma, addr, FOLL_SPLIT); vma->vm_flags &= ~VM_HUGEPAGE; vma->vm_flags |= VM_NOHUGEPAGE; + walk_page_vma(vma, &thp_split_walk_ops, NULL); } mm->def_flags |= VM_NOHUGEPAGE; -#endif } +#else +static inline void thp_split_mm(struct mm_struct *mm) +{ +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* * Remove all empty zero pages from the mapping for lazy refaulting diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 7dfae86afa47..ff8234bca56c 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -117,7 +117,7 @@ static inline pte_t __rste_to_pte(unsigned long rste) _PAGE_YOUNG); #ifdef CONFIG_MEM_SOFT_DIRTY pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, - _PAGE_DIRTY); + _PAGE_SOFT_DIRTY); #endif pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC); diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index de7ca4b6718f..1d17413b319a 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -55,19 +55,26 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz */ static DEFINE_SPINLOCK(s390_kernel_write_lock); -void notrace s390_kernel_write(void *dst, const void *src, size_t size) +notrace void *s390_kernel_write(void *dst, const void *src, size_t size) { + void *tmp = dst; unsigned long flags; long copied; spin_lock_irqsave(&s390_kernel_write_lock, flags); - while (size) { - copied = s390_kernel_write_odd(dst, src, size); - dst += copied; - src += copied; - size -= copied; + if (!(flags & PSW_MASK_DAT)) { + memcpy(dst, src, size); + } else { + while (size) { + copied = s390_kernel_write_odd(tmp, src, size); + tmp += copied; + src += copied; + size -= copied; + } } spin_unlock_irqrestore(&s390_kernel_write_lock, flags); + + return dst; } static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count) diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index d2910fa834c8..8386c58fdb3a 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c @@ -49,12 +49,6 @@ void numa_update_cpu_topology(void) mode->update_cpu_topology(); } -int __node_distance(int a, int b) -{ - return mode->distance ? mode->distance(a, b) : 0; -} -EXPORT_SYMBOL(__node_distance); - int numa_debug_enabled; /* diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c index 16b4d8b0bb85..2c44b94f82fb 100644 --- a/arch/sh/boards/mach-landisk/setup.c +++ b/arch/sh/boards/mach-landisk/setup.c @@ -82,6 +82,9 @@ device_initcall(landisk_devices_setup); static void __init landisk_setup(char **cmdline_p) { + /* I/O port identity mapping */ + __set_io_port_base(0); + /* LED ON */ __raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED); diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig index e5beb625ab88..87db9a84b5ec 100644 --- a/arch/sh/configs/sh03_defconfig +++ b/arch/sh/configs/sh03_defconfig @@ -46,7 +46,6 @@ CONFIG_BLK_DEV_IDETAPE=m CONFIG_SCSI=m CONFIG_BLK_DEV_SD=m CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 22d968bfe9bb..d770da3f8b6f 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address); extern void pmd_free(struct mm_struct *mm, pmd_t *pmd); +#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp)) #endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, @@ -33,13 +34,4 @@ do { \ tlb_remove_page((tlb), (pte)); \ } while (0) -#if CONFIG_PGTABLE_LEVELS > 2 -#define __pmd_free_tlb(tlb, pmdp, addr) \ -do { \ - struct page *page = virt_to_page(pmdp); \ - pgtable_pmd_page_dtor(page); \ - tlb_remove_page((tlb), page); \ -} while (0); -#endif - #endif /* __ASM_SH_PGALLOC_H */ diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index d31f66e82ce5..4a8ec9e40cc2 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -199,7 +199,7 @@ syscall_trace_entry: mov.l @(OFF_R7,r15), r7 ! arg3 mov.l @(OFF_R3,r15), r3 ! syscall_nr ! - mov.l 2f, r10 ! Number of syscalls + mov.l 6f, r10 ! Number of syscalls cmp/hs r10, r3 bf syscall_call mov #-ENOSYS, r0 @@ -353,7 +353,7 @@ ENTRY(system_call) tst r9, r8 bf syscall_trace_entry ! - mov.l 2f, r8 ! Number of syscalls + mov.l 6f, r8 ! Number of syscalls cmp/hs r8, r3 bt syscall_badsys ! @@ -392,7 +392,7 @@ syscall_exit: #if !defined(CONFIG_CPU_SH2) 1: .long TRA #endif -2: .long NR_syscalls +6: .long NR_syscalls 3: .long sys_call_table 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 6c325d53a20a..bde4d21a8ac8 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -73,7 +73,6 @@ CONFIG_RAID_ATTRS=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=m -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c index 16b50afe7b52..646dd58169ec 100644 --- a/arch/sparc/kernel/ptrace_32.c +++ b/arch/sparc/kernel/ptrace_32.c @@ -46,82 +46,79 @@ enum sparc_regset { REGSET_FP, }; +static int regwindow32_get(struct task_struct *target, + const struct pt_regs *regs, + u32 *uregs) +{ + unsigned long reg_window = regs->u_regs[UREG_I6]; + int size = 16 * sizeof(u32); + + if (target == current) { + if (copy_from_user(uregs, (void __user *)reg_window, size)) + return -EFAULT; + } else { + if (access_process_vm(target, reg_window, uregs, size, + FOLL_FORCE) != size) + return -EFAULT; + } + return 0; +} + +static int regwindow32_set(struct task_struct *target, + const struct pt_regs *regs, + u32 *uregs) +{ + unsigned long reg_window = regs->u_regs[UREG_I6]; + int size = 16 * sizeof(u32); + + if (target == current) { + if (copy_to_user((void __user *)reg_window, uregs, size)) + return -EFAULT; + } else { + if (access_process_vm(target, reg_window, uregs, size, + FOLL_FORCE | FOLL_WRITE) != size) + return -EFAULT; + } + return 0; +} + static int genregs32_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const struct pt_regs *regs = target->thread.kregs; - unsigned long __user *reg_window; - unsigned long *k = kbuf; - unsigned long __user *u = ubuf; - unsigned long reg; + u32 uregs[16]; + int ret; if (target == current) flush_user_windows(); - pos /= sizeof(reg); - count /= sizeof(reg); + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + regs->u_regs, + 0, 16 * sizeof(u32)); + if (ret || !count) + return ret; - if (kbuf) { - for (; count > 0 && pos < 16; count--) - *k++ = regs->u_regs[pos++]; - - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; - reg_window -= 16; - for (; count > 0 && pos < 32; count--) { - if (get_user(*k++, ®_window[pos++])) - return -EFAULT; - } - } else { - for (; count > 0 && pos < 16; count--) { - if (put_user(regs->u_regs[pos++], u++)) - return -EFAULT; - } - - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; - reg_window -= 16; - for (; count > 0 && pos < 32; count--) { - if (get_user(reg, ®_window[pos++]) || - put_user(reg, u++)) - return -EFAULT; - } - } - while (count > 0) { - switch (pos) { - case 32: /* PSR */ - reg = regs->psr; - break; - case 33: /* PC */ - reg = regs->pc; - break; - case 34: /* NPC */ - reg = regs->npc; - break; - case 35: /* Y */ - reg = regs->y; - break; - case 36: /* WIM */ - case 37: /* TBR */ - reg = 0; - break; - default: - goto finish; - } - - if (kbuf) - *k++ = reg; - else if (put_user(reg, u++)) + if (pos < 32 * sizeof(u32)) { + if (regwindow32_get(target, regs, uregs)) return -EFAULT; - pos++; - count--; + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + uregs, + 16 * sizeof(u32), 32 * sizeof(u32)); + if (ret || !count) + return ret; } -finish: - pos *= sizeof(reg); - count *= sizeof(reg); - return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, - 38 * sizeof(reg), -1); + uregs[0] = regs->psr; + uregs[1] = regs->pc; + uregs[2] = regs->npc; + uregs[3] = regs->y; + uregs[4] = 0; /* WIM */ + uregs[5] = 0; /* TBR */ + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + uregs, + 32 * sizeof(u32), 38 * sizeof(u32)); } static int genregs32_set(struct task_struct *target, @@ -130,82 +127,58 @@ static int genregs32_set(struct task_struct *target, const void *kbuf, const void __user *ubuf) { struct pt_regs *regs = target->thread.kregs; - unsigned long __user *reg_window; - const unsigned long *k = kbuf; - const unsigned long __user *u = ubuf; - unsigned long reg; + u32 uregs[16]; + u32 psr; + int ret; if (target == current) flush_user_windows(); - pos /= sizeof(reg); - count /= sizeof(reg); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->u_regs, + 0, 16 * sizeof(u32)); + if (ret || !count) + return ret; - if (kbuf) { - for (; count > 0 && pos < 16; count--) - regs->u_regs[pos++] = *k++; - - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; - reg_window -= 16; - for (; count > 0 && pos < 32; count--) { - if (put_user(*k++, ®_window[pos++])) - return -EFAULT; - } - } else { - for (; count > 0 && pos < 16; count--) { - if (get_user(reg, u++)) - return -EFAULT; - regs->u_regs[pos++] = reg; - } - - reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; - reg_window -= 16; - for (; count > 0 && pos < 32; count--) { - if (get_user(reg, u++) || - put_user(reg, ®_window[pos++])) - return -EFAULT; - } - } - while (count > 0) { - unsigned long psr; - - if (kbuf) - reg = *k++; - else if (get_user(reg, u++)) + if (pos < 32 * sizeof(u32)) { + if (regwindow32_get(target, regs, uregs)) return -EFAULT; - - switch (pos) { - case 32: /* PSR */ - psr = regs->psr; - psr &= ~(PSR_ICC | PSR_SYSCALL); - psr |= (reg & (PSR_ICC | PSR_SYSCALL)); - regs->psr = psr; - break; - case 33: /* PC */ - regs->pc = reg; - break; - case 34: /* NPC */ - regs->npc = reg; - break; - case 35: /* Y */ - regs->y = reg; - break; - case 36: /* WIM */ - case 37: /* TBR */ - break; - default: - goto finish; - } - - pos++; - count--; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + uregs, + 16 * sizeof(u32), 32 * sizeof(u32)); + if (ret) + return ret; + if (regwindow32_set(target, regs, uregs)) + return -EFAULT; + if (!count) + return 0; } -finish: - pos *= sizeof(reg); - count *= sizeof(reg); - + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &psr, + 32 * sizeof(u32), 33 * sizeof(u32)); + if (ret) + return ret; + regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) | + (psr & (PSR_ICC | PSR_SYSCALL)); + if (!count) + return 0; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->pc, + 33 * sizeof(u32), 34 * sizeof(u32)); + if (ret || !count) + return ret; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->npc, + 34 * sizeof(u32), 35 * sizeof(u32)); + if (ret || !count) + return ret; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->y, + 35 * sizeof(u32), 36 * sizeof(u32)); + if (ret || !count) + return ret; return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - 38 * sizeof(reg), -1); + 36 * sizeof(u32), 38 * sizeof(u32)); } static int fpregs32_get(struct task_struct *target, diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index c9d41a96468f..3f5930bfab06 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -572,19 +572,13 @@ static int genregs32_get(struct task_struct *target, for (; count > 0 && pos < 32; count--) { if (access_process_vm(target, (unsigned long) - ®_window[pos], + ®_window[pos++], ®, sizeof(reg), FOLL_FORCE) != sizeof(reg)) return -EFAULT; - if (access_process_vm(target, - (unsigned long) u, - ®, sizeof(reg), - FOLL_FORCE | FOLL_WRITE) - != sizeof(reg)) + if (put_user(reg, u++)) return -EFAULT; - pos++; - u++; } } } @@ -684,12 +678,7 @@ static int genregs32_set(struct task_struct *target, } } else { for (; count > 0 && pos < 32; count--) { - if (access_process_vm(target, - (unsigned long) - u, - ®, sizeof(reg), - FOLL_FORCE) - != sizeof(reg)) + if (get_user(reg, u++)) return -EFAULT; if (access_process_vm(target, (unsigned long) diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index cc3ad64479ac..9e256d4d1f4c 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -379,7 +379,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm) return NULL; page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT); if (!pgtable_pte_page_ctor(page)) { - __free_page(page); return NULL; } return page; diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index e2839b5c246c..6539c50fb9aa 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -87,7 +87,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) -sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' +sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' quiet_cmd_zoffset = ZOFFSET $@ cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 6b84afdd7538..292b5bc6e3a3 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -38,6 +38,8 @@ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += -Wno-pointer-sign +# Disable relocation relaxation in case the link is not PIE. +KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n @@ -102,7 +104,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o quiet_cmd_check_data_rel = DATAREL $@ define cmd_check_data_rel for obj in $(filter %.o,$^); do \ - ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \ + $(READELF) -S $$obj | grep -qF .rel.local && { \ echo "error: $$obj has data relocations!" >&2; \ exit 1; \ } || true; \ diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 70ffce98c568..d7c0fcc1dbf9 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -49,16 +49,17 @@ * Position Independent Executable (PIE) so that linker won't optimize * R_386_GOT32X relocation to its fixed symbol address. Older * linkers generate R_386_32 relocations against locally defined symbols, - * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less + * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle * R_386_32 relocations when relocating the kernel. To generate - * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as + * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as * hidden: */ .hidden _bss .hidden _ebss .hidden _got .hidden _egot + .hidden _end __HEAD ENTRY(startup_32) diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 07d2002da642..50c9eeb36f0d 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -42,6 +42,7 @@ .hidden _ebss .hidden _got .hidden _egot + .hidden _end __HEAD .code32 diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 59ce9ed58430..088709089e9b 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -137,7 +137,6 @@ CONFIG_CONNECTOR=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SPI_ATTRS=y @@ -205,7 +204,6 @@ CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y CONFIG_FB_EFI=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index d0a5ffeae8df..8092d7baf8b5 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -136,7 +136,6 @@ CONFIG_CONNECTOR=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SPI_ATTRS=y @@ -201,7 +200,6 @@ CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y CONFIG_FB_EFI=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 5f6a5af9c489..77043a82da51 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -127,10 +127,6 @@ ddq_add_8: /* generate a unique variable for ddq_add_x */ -.macro setddq n - var_ddq_add = ddq_add_\n -.endm - /* generate a unique variable for xmm register */ .macro setxdata n var_xdata = %xmm\n @@ -140,9 +136,7 @@ ddq_add_8: .macro club name, id .altmacro - .if \name == DDQ_DATA - setddq %\id - .elseif \name == XDATA + .if \name == XDATA setxdata %\id .endif .noaltmacro @@ -165,9 +159,8 @@ ddq_add_8: .set i, 1 .rept (by - 1) - club DDQ_DATA, i club XDATA, i - vpaddq var_ddq_add(%rip), xcounter, var_xdata + vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata vptest ddq_low_msk(%rip), var_xdata jnz 1f vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata @@ -180,8 +173,7 @@ ddq_add_8: vmovdqa 1*16(p_keys), xkeyA vpxor xkey0, xdata0, xdata0 - club DDQ_DATA, by - vpaddq var_ddq_add(%rip), xcounter, xcounter + vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter vptest ddq_low_msk(%rip), xcounter jnz 1f vpaddq ddq_high_add_1(%rip), xcounter, xcounter diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index e40bdf024ba7..9afeb58c910e 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -266,7 +266,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff PSHUFB_XMM %xmm2, %xmm0 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv - PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7 movdqu HashKey(%arg2), %xmm13 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ @@ -978,7 +978,7 @@ _initial_blocks_done\@: * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ -.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \ +.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 @@ -1186,7 +1186,7 @@ aes_loop_par_enc_done\@: * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ -.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \ +.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 diff --git a/arch/x86/events/Kconfig b/arch/x86/events/Kconfig index 9a7a1446cb3a..4a809c6cbd2f 100644 --- a/arch/x86/events/Kconfig +++ b/arch/x86/events/Kconfig @@ -10,11 +10,11 @@ config PERF_EVENTS_INTEL_UNCORE available on NehalemEX and more modern processors. config PERF_EVENTS_INTEL_RAPL - tristate "Intel rapl performance events" - depends on PERF_EVENTS && CPU_SUP_INTEL && PCI + tristate "Intel/AMD rapl performance events" + depends on PERF_EVENTS && (CPU_SUP_INTEL || CPU_SUP_AMD) && PCI default y ---help--- - Include support for Intel rapl performance events for power + Include support for Intel and AMD rapl performance events for power monitoring on modern processors. config PERF_EVENTS_INTEL_CSTATE diff --git a/arch/x86/events/Makefile b/arch/x86/events/Makefile index 9e07f554333f..726e83c0a31a 100644 --- a/arch/x86/events/Makefile +++ b/arch/x86/events/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y += core.o probe.o +obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += rapl.o obj-y += amd/ obj-$(CONFIG_X86_LOCAL_APIC) += msr.o obj-$(CONFIG_CPU_SUP_INTEL) += intel/ diff --git a/arch/x86/events/intel/Makefile b/arch/x86/events/intel/Makefile index 3468b0c1dc7c..e67a5886336c 100644 --- a/arch/x86/events/intel/Makefile +++ b/arch/x86/events/intel/Makefile @@ -2,8 +2,6 @@ obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o -obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o -intel-rapl-perf-objs := rapl.o obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/rapl.c similarity index 98% rename from arch/x86/events/intel/rapl.c rename to arch/x86/events/rapl.c index 5053a403e4ae..187c72a58e69 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/rapl.c @@ -1,11 +1,14 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Support Intel RAPL energy consumption counters + * Support Intel/AMD RAPL energy consumption counters * Copyright (C) 2013 Google, Inc., Stephane Eranian * * Intel RAPL interface is specified in the IA-32 Manual Vol3b * section 14.7.1 (September 2013) * + * AMD RAPL interface for Fam17h is described in the public PPR: + * https://bugzilla.kernel.org/show_bug.cgi?id=206537 + * * RAPL provides more controls than just reporting energy consumption * however here we only expose the 3 energy consumption free running * counters (pp0, pkg, dram). @@ -58,8 +61,8 @@ #include #include #include -#include "../perf_event.h" -#include "../probe.h" +#include "perf_event.h" +#include "probe.h" MODULE_LICENSE("GPL"); @@ -639,7 +642,7 @@ static const struct attribute_group *rapl_attr_update[] = { &rapl_events_pkg_group, &rapl_events_ram_group, &rapl_events_gpu_group, - &rapl_events_gpu_group, + &rapl_events_psys_group, NULL, }; diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 44c48e34d799..00eac7f1529b 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -619,6 +619,11 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) * MXCSR and XCR definitions: */ +static inline void ldmxcsr(u32 mxcsr) +{ + asm volatile("ldmxcsr %0" :: "m" (mxcsr)); +} + extern unsigned int mxcsr_feature_mask; #define XCR_XFEATURE_ENABLED_MASK 0x00000000 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7d91a3f5b26a..c41686641c3f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1160,7 +1160,7 @@ struct kvm_x86_ops { void (*enable_log_dirty_pt_masked)(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t offset, unsigned long mask); - int (*write_log_dirty)(struct kvm_vcpu *vcpu); + int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa); /* pmu operations of sub-arch */ const struct kvm_pmu_ops *pmu_ops; @@ -1553,7 +1553,8 @@ asmlinkage void kvm_spurious_fault(void); _ASM_EXTABLE(666b, 667b) #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 54f5d54280f6..a07dfdf7759e 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -334,7 +334,7 @@ struct x86_hw_tss { #define INVALID_IO_BITMAP_OFFSET 0x8000 struct entry_stack { - unsigned long words[64]; + char stack[PAGE_SIZE]; }; struct entry_stack_page { diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 332eb3525867..902be2e6e96c 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -309,8 +309,8 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, static const unsigned int argument_offs[] = { #ifdef __i386__ offsetof(struct pt_regs, ax), - offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), + offsetof(struct pt_regs, cx), #define NR_REG_ARGUMENTS 3 #else offsetof(struct pt_regs, di), diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h index 27c47d183f4b..8b58d6975d5d 100644 --- a/arch/x86/include/asm/smap.h +++ b/arch/x86/include/asm/smap.h @@ -57,8 +57,10 @@ static __always_inline unsigned long smap_save(void) { unsigned long flags; - asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC, - X86_FEATURE_SMAP) + asm volatile ("# smap_save\n\t" + ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP) + "pushf; pop %0; " __ASM_CLAC "\n\t" + "1:" : "=rm" (flags) : : "memory", "cc"); return flags; @@ -66,7 +68,10 @@ static __always_inline unsigned long smap_save(void) static __always_inline void smap_restore(unsigned long flags) { - asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP) + asm volatile ("# smap_restore\n\t" + ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP) + "push %0; popf\n\t" + "1:" : : "g" (flags) : "memory", "cc"); } diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 251c795b4eb3..c4bc01da820e 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -18,10 +18,13 @@ #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 +#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 +#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 /* Protect the PCI config register pairs used for SMN and DF indirect access. */ static DEFINE_MUTEX(smn_mutex); @@ -32,6 +35,7 @@ static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, {} }; @@ -50,8 +54,10 @@ const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, {} }; EXPORT_SYMBOL_GPL(amd_nb_misc_ids); @@ -65,7 +71,9 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, {} }; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 25b8c45467fc..fce94c799f01 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2099,7 +2099,7 @@ void __init init_apic_mappings(void) unsigned int new_apicid; if (apic_validate_deadline_timer()) - pr_debug("TSC deadline timer available\n"); + pr_info("TSC deadline timer available\n"); if (x2apic_mode) { boot_cpu_physical_apicid = read_apic_id(); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index f0262cb5657a..ea6d9da9b094 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2329,12 +2329,12 @@ static int mp_irqdomain_create(int ioapic) ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops, (void *)(long)ioapic); - /* Release fw handle if it was allocated above */ - if (!cfg->dev) - irq_domain_free_fwnode(fn); - - if (!ip->irqdomain) + if (!ip->irqdomain) { + /* Release fw handle if it was allocated above */ + if (!cfg->dev) + irq_domain_free_fwnode(fn); return -ENOMEM; + } ip->irqdomain->parent = parent; @@ -2348,8 +2348,13 @@ static int mp_irqdomain_create(int ioapic) static void ioapic_destroy_irqdomain(int idx) { + struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg; + struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode; + if (ioapics[idx].irqdomain) { irq_domain_remove(ioapics[idx].irqdomain); + if (!cfg->dev) + irq_domain_free_fwnode(fn); ioapics[idx].irqdomain = NULL; } } diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index 159bd0cb8548..a20873bbbed6 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -262,12 +262,13 @@ void __init arch_init_msi_domain(struct irq_domain *parent) msi_default_domain = pci_msi_create_irq_domain(fn, &pci_msi_domain_info, parent); - irq_domain_free_fwnode(fn); } - if (!msi_default_domain) + if (!msi_default_domain) { + irq_domain_free_fwnode(fn); pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); - else + } else { msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK; + } } #ifdef CONFIG_IRQ_REMAP @@ -300,7 +301,8 @@ struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent, if (!fn) return NULL; d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent); - irq_domain_free_fwnode(fn); + if (!d) + irq_domain_free_fwnode(fn); return d; } #endif @@ -363,7 +365,8 @@ static struct irq_domain *dmar_get_irq_domain(void) if (fn) { dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info, x86_vector_domain); - irq_domain_free_fwnode(fn); + if (!dmar_domain) + irq_domain_free_fwnode(fn); } out: mutex_unlock(&dmar_lock); @@ -488,7 +491,10 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id) } d = msi_create_irq_domain(fn, domain_info, parent); - irq_domain_free_fwnode(fn); + if (!d) { + irq_domain_free_fwnode(fn); + kfree(domain_info); + } return d; } diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 2c5676b0a6e7..c8203694d9ce 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -446,12 +446,10 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, trace_vector_activate(irqd->irq, apicd->is_managed, apicd->can_reserve, reserve); - /* Nothing to do for fixed assigned vectors */ - if (!apicd->can_reserve && !apicd->is_managed) - return 0; - raw_spin_lock_irqsave(&vector_lock, flags); - if (reserve || irqd_is_managed_and_shutdown(irqd)) + if (!apicd->can_reserve && !apicd->is_managed) + assign_irq_vector_any_locked(irqd); + else if (reserve || irqd_is_managed_and_shutdown(irqd)) vector_assign_managed_shutdown(irqd); else if (apicd->is_managed) ret = activate_managed(irqd); @@ -556,6 +554,10 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, irqd->chip_data = apicd; irqd->hwirq = virq + i; irqd_set_single_target(irqd); + + /* Don't invoke affinity setter on deactivated interrupts */ + irqd_set_affinity_on_activate(irqd); + /* * Legacy vectors are already assigned when the IOAPIC * takes them over. They stay on the same vector. This is @@ -703,7 +705,6 @@ int __init arch_early_irq_init(void) x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops, NULL); BUG_ON(x86_vector_domain == NULL); - irq_domain_free_fwnode(fn); irq_set_default_host(x86_vector_domain); arch_init_msi_domain(x86_vector_domain); @@ -769,20 +770,10 @@ void lapic_offline(void) static int apic_set_affinity(struct irq_data *irqd, const struct cpumask *dest, bool force) { - struct apic_chip_data *apicd = apic_chip_data(irqd); int err; - /* - * Core code can call here for inactive interrupts. For inactive - * interrupts which use managed or reservation mode there is no - * point in going through the vector assignment right now as the - * activation will assign a vector which fits the destination - * cpumask. Let the core code store the destination mask and be - * done with it. - */ - if (!irqd_is_activated(irqd) && - (apicd->is_managed || apicd->can_reserve)) - return IRQ_SET_MASK_OK; + if (WARN_ON_ONCE(!irqd_is_activated(irqd))) + return -EIO; raw_spin_lock(&vector_lock); cpumask_and(vector_searchmask, dest, cpu_online_mask); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 650df6d21049..9b3f25e14608 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -366,6 +366,9 @@ out: cr4_clear_bits(X86_CR4_UMIP); } +/* These bits should not change their value after CPU init is finished. */ +static const unsigned long cr4_pinned_mask = + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE; static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning); static unsigned long cr4_pinned_bits __ro_after_init; @@ -390,20 +393,20 @@ EXPORT_SYMBOL(native_write_cr0); void native_write_cr4(unsigned long val) { - unsigned long bits_missing = 0; + unsigned long bits_changed = 0; set_register: asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits)); if (static_branch_likely(&cr_pinning)) { - if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) { - bits_missing = ~val & cr4_pinned_bits; - val |= bits_missing; + if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) { + bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits; + val = (val & ~cr4_pinned_mask) | cr4_pinned_bits; goto set_register; } - /* Warn after we've set the missing bits. */ - WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n", - bits_missing); + /* Warn after we've corrected the changed bits. */ + WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n", + bits_changed); } } EXPORT_SYMBOL(native_write_cr4); @@ -415,7 +418,7 @@ void cr4_init(void) if (boot_cpu_has(X86_FEATURE_PCID)) cr4 |= X86_CR4_PCIDE; if (static_branch_likely(&cr_pinning)) - cr4 |= cr4_pinned_bits; + cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits; __write_cr4(cr4); @@ -430,10 +433,7 @@ void cr4_init(void) */ static void __init setup_cr_pinning(void) { - unsigned long mask; - - mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP); - cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask; + cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask; static_key_enable(&cr_pinning.key); } diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c index 1f30117b24ba..eb2d41c1816d 100644 --- a/arch/x86/kernel/cpu/mce/inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -511,7 +511,7 @@ static void do_inject(void) */ if (inj_type == DFR_INT_INJ) { i_mce.status |= MCI_STATUS_DEFERRED; - i_mce.status |= (i_mce.status & ~MCI_STATUS_UC); + i_mce.status &= ~MCI_STATUS_UC; } /* diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index d8cc5223b7ce..87a34b6e06a2 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -260,6 +260,7 @@ static bool __get_mem_config_intel(struct rdt_resource *r) r->num_closid = edx.split.cos_max + 1; r->membw.max_delay = eax.split.max_delay + 1; r->default_ctrl = MAX_MBA_BW; + r->membw.mbm_width = MBM_CNTR_WIDTH; if (ecx & MBA_IS_LINEAR) { r->membw.delay_linear = true; r->membw.min_bw = MAX_MBA_BW - r->membw.max_delay; @@ -289,6 +290,7 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r) /* AMD does not use delay */ r->membw.delay_linear = false; + r->membw.mbm_width = MBM_CNTR_WIDTH_AMD; r->membw.min_bw = 0; r->membw.bw_gran = 1; /* Max value is 2048, Data width should be 4 in decimal */ diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 3dd13f3a8b23..17095435c875 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -32,6 +32,7 @@ #define CQM_LIMBOCHECK_INTERVAL 1000 #define MBM_CNTR_WIDTH 24 +#define MBM_CNTR_WIDTH_AMD 44 #define MBM_OVERFLOW_INTERVAL 1000 #define MAX_MBA_BW 100u #define MBA_IS_LINEAR 0x4 @@ -368,6 +369,7 @@ struct rdt_cache { * @min_bw: Minimum memory bandwidth percentage user can request * @bw_gran: Granularity at which the memory bandwidth is allocated * @delay_linear: True if memory B/W delay is in linear scale + * @mbm_width: memory B/W monitor counter width * @mba_sc: True if MBA software controller(mba_sc) is enabled * @mb_map: Mapping of memory B/W percentage to memory B/W delay */ @@ -376,6 +378,7 @@ struct rdt_membw { u32 min_bw; u32 bw_gran; u32 delay_linear; + u32 mbm_width; bool mba_sc; u32 *mb_map; }; diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 773124b0e18a..0cf4f87f6012 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -216,8 +216,9 @@ void free_rmid(u32 rmid) static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr) { - u64 shift = 64 - MBM_CNTR_WIDTH, chunks; + u64 shift, chunks; + shift = 64 - rdt_resources_all[RDT_RESOURCE_MBA].membw.mbm_width; chunks = (cur_msr << shift) - (prev_msr << shift); return chunks >>= shift; } diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 20856d80dce3..54b711bc0607 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1027,6 +1027,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) { _r_cdp = NULL; + _d_cdp = NULL; ret = -EINVAL; } diff --git a/arch/x86/kernel/cpu/umwait.c b/arch/x86/kernel/cpu/umwait.c index c222f283b456..32b4dc9030aa 100644 --- a/arch/x86/kernel/cpu/umwait.c +++ b/arch/x86/kernel/cpu/umwait.c @@ -17,12 +17,6 @@ */ static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE); -u32 get_umwait_control_msr(void) -{ - return umwait_control_cached; -} -EXPORT_SYMBOL_GPL(get_umwait_control_msr); - /* * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by * hardware or BIOS before kernel boot. diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 12c70840980e..cd8839027f66 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -101,6 +101,12 @@ void kernel_fpu_begin(void) copy_fpregs_to_fpstate(¤t->thread.fpu); } __cpu_invalidate_fpregs_state(); + + if (boot_cpu_has(X86_FEATURE_XMM)) + ldmxcsr(MXCSR_DEFAULT); + + if (boot_cpu_has(X86_FEATURE_FPU)) + asm volatile ("fninit"); } EXPORT_SYMBOL_GPL(kernel_fpu_begin); diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index c94fec268ef2..755eb26cbec0 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -1017,7 +1017,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of copy_part(offsetof(struct fxregs_state, st_space), 128, &xsave->i387.st_space, &kbuf, &offset_start, &count); if (header.xfeatures & XFEATURE_MASK_SSE) - copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256, + copy_part(xstate_offsets[XFEATURE_SSE], 256, &xsave->i387.xmm_space, &kbuf, &offset_start, &count); /* * Fill xsave->i387.sw_reserved value for ptrace frame: diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 519649ddf100..fe522691ac71 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -207,7 +207,7 @@ spurious_8259A_irq: * lets ACK and report it. [once per IRQ] */ if (!(spurious_irq_mask & irqmask)) { - printk(KERN_DEBUG + printk_deferred(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c index 87ef69a72c52..7bb4c3cbf4dc 100644 --- a/arch/x86/kernel/idt.c +++ b/arch/x86/kernel/idt.c @@ -318,7 +318,11 @@ void __init idt_setup_apic_and_irq_gates(void) #ifdef CONFIG_X86_LOCAL_APIC for_each_clear_bit_from(i, system_vectors, NR_VECTORS) { - set_bit(i, system_vectors); + /* + * Don't set the non assigned system vectors in the + * system_vectors bitmap. Otherwise they show up in + * /proc/interrupts. + */ entry = spurious_entries_start + 8 * (i - FIRST_SYSTEM_VECTOR); set_intr_gate(i, entry); } diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 12df3a4abfdd..6b32ab009c19 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -43,7 +43,7 @@ static int map_irq_stack(unsigned int cpu) pages[i] = pfn_to_page(pa >> PAGE_SHIFT); } - va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL); + va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL); if (!va) return -ENOMEM; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 43fc13c831af..62c39baea39e 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -746,16 +746,11 @@ asm( NOKPROBE_SYMBOL(kretprobe_trampoline); STACK_FRAME_NON_STANDARD(kretprobe_trampoline); -static struct kprobe kretprobe_kprobe = { - .addr = (void *)kretprobe_trampoline, -}; - /* * Called from kretprobe_trampoline */ __used __visible void *trampoline_handler(struct pt_regs *regs) { - struct kprobe_ctlblk *kcb; struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; struct hlist_node *tmp; @@ -765,16 +760,12 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) void *frame_pointer; bool skipped = false; - preempt_disable(); - /* * Set a dummy kprobe for avoiding kretprobe recursion. * Since kretprobe never run in kprobe handler, kprobe must not * be running at this point. */ - kcb = get_kprobe_ctlblk(); - __this_cpu_write(current_kprobe, &kretprobe_kprobe); - kcb->kprobe_status = KPROBE_HIT_ACTIVE; + kprobe_busy_begin(); INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); @@ -850,7 +841,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) __this_cpu_write(current_kprobe, &ri->rp->kp); ri->ret_addr = correct_ret_addr; ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, &kretprobe_kprobe); + __this_cpu_write(current_kprobe, &kprobe_busy); } recycle_rp_inst(ri, &empty_rp); @@ -866,8 +857,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) kretprobe_hash_unlock(current, &flags); - __this_cpu_write(current_kprobe, NULL); - preempt_enable(); + kprobe_busy_end(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index af64519b2695..da3cc3a10d63 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -316,7 +316,7 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task, */ mutex_lock(&task->mm->context.lock); ldt = task->mm->context.ldt; - if (unlikely(idx >= ldt->nr_entries)) + if (unlikely(!ldt || idx >= ldt->nr_entries)) base = 0; else base = get_desc_base(ldt->entries + idx); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9674321ce3a3..8367bd7a9a81 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1599,14 +1599,28 @@ int native_cpu_disable(void) if (ret) return ret; - /* - * Disable the local APIC. Otherwise IPI broadcasts will reach - * it. It still responds normally to INIT, NMI, SMI, and SIPI - * messages. - */ - apic_soft_disable(); cpu_disable_common(); + /* + * Disable the local APIC. Otherwise IPI broadcasts will reach + * it. It still responds normally to INIT, NMI, SMI, and SIPI + * messages. + * + * Disabling the APIC must happen after cpu_disable_common() + * which invokes fixup_irqs(). + * + * Disabling the APIC preserves already set bits in IRR, but + * an interrupt arriving after disabling the local APIC does not + * set the corresponding IRR bit. + * + * fixup_irqs() scans IRR for set bits so it can raise a not + * yet handled interrupt on the new destination CPU via an IPI + * but obviously it can't do so for IRR bits which are not set. + * IOW, interrupts arriving after disabling the local APIC will + * be lost. + */ + apic_soft_disable(); + return 0; } diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 2d6898c2cb64..6d83b4b857e6 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, * or a page fault), which can make frame pointers * unreliable. */ - if (IS_ENABLED(CONFIG_FRAME_POINTER)) return -EINVAL; } @@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, if (unwind_error(&state)) return -EINVAL; - /* Success path for non-user tasks, i.e. kthreads and idle tasks */ - if (!(task->flags & (PF_KTHREAD | PF_IDLE))) - return -EINVAL; - return 0; } diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index c65adaf81384..41200706e6da 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c @@ -133,10 +133,15 @@ static const struct freq_desc freq_desc_ann = { .mask = 0x0f, }; -/* 24 MHz crystal? : 24 * 13 / 4 = 78 MHz */ +/* + * 24 MHz crystal? : 24 * 13 / 4 = 78 MHz + * Frequency step for Lightning Mountain SoC is fixed to 78 MHz, + * so all the frequency entries are 78000. + */ static const struct freq_desc freq_desc_lgm = { .use_msr_plat = true, - .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 }, + .freqs = { 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000, + 78000, 78000, 78000, 78000, 78000, 78000, 78000, 78000 }, .mask = 0x0f, }; diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index aa0f39dc8129..187a86e0e753 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -431,8 +431,11 @@ bool unwind_next_frame(struct unwind_state *state) /* * Find the orc_entry associated with the text address. * - * Decrement call return addresses by one so they work for sibling - * calls and calls to noreturn functions. + * For a call frame (as opposed to a signal frame), state->ip points to + * the instruction after the call. That instruction's stack layout + * could be different from the call instruction's layout, for example + * if the call was to a noreturn function. So get the ORC data for the + * call instruction itself. */ orc = orc_find(state->signal ? state->ip : state->ip - 1); if (!orc) { @@ -653,6 +656,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, state->sp = task->thread.sp; state->bp = READ_ONCE_NOCHECK(frame->bp); state->ip = READ_ONCE_NOCHECK(frame->ret_addr); + state->signal = (void *)state->ip == ret_from_fork; } if (get_stack_info((unsigned long *)state->sp, state->task, diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index bac1a65a9d39..1afe211d7a7c 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -362,6 +362,7 @@ SECTIONS .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; *(.bss..page_aligned) + . = ALIGN(PAGE_SIZE); *(BSS_MAIN) BSS_DECRYPTED . = ALIGN(PAGE_SIZE); diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 1cc6c47dc77e..341f58a01de0 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -7,7 +7,7 @@ #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS #define KVM_POSSIBLE_CR4_GUEST_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ - | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE) + | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD) #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \ static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5d2587005d0e..6920f1d3b66f 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2085,7 +2085,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) { struct kvm_lapic *apic = vcpu->arch.apic; - if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || + if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) || apic_lvtt_period(apic)) return; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a3824ae9a634..bb743f956c23 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1819,10 +1819,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, * Emulate arch specific page modification logging for the * nested hypervisor */ -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu) +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa) { if (kvm_x86_ops->write_log_dirty) - return kvm_x86_ops->write_log_dirty(vcpu); + return kvm_x86_ops->write_log_dirty(vcpu, l2_gpa); return 0; } @@ -2045,7 +2045,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); } @@ -4580,7 +4581,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, nonleaf_bit8_rsvd | rsvd_bits(7, 7) | rsvd_bits(maxphyaddr, 51); rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | - nonleaf_bit8_rsvd | gbpages_bit_rsvd | + gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51); rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 51); diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index d55674f44a18..6f2208cf30df 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -209,7 +209,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn); -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa); int kvm_mmu_post_init_vm(struct kvm *kvm); void kvm_mmu_pre_destroy_vm(struct kvm *kvm); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 4e3f137ffa8c..a20fc1ba607f 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -220,7 +220,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte) static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, struct guest_walker *walker, - int write_fault) + gpa_t addr, int write_fault) { unsigned level, index; pt_element_t pte, orig_pte; @@ -245,7 +245,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, !(pte & PT_GUEST_DIRTY_MASK)) { trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); #if PTTYPE == PTTYPE_EPT - if (kvm_arch_write_log_dirty(vcpu)) + if (kvm_arch_write_log_dirty(vcpu, addr)) return -EINVAL; #endif pte |= PT_GUEST_DIRTY_MASK; @@ -442,7 +442,8 @@ retry_walk: (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); if (unlikely(!accessed_dirty)) { - ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); + ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, + addr, write_fault); if (unlikely(ret < 0)) goto error; else if (ret) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 5fac01865a2d..a1e62dda5607 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3913,6 +3913,8 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx) void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) { + BUILD_BUG_ON(KVM_CR4_GUEST_OWNED_BITS & ~KVM_POSSIBLE_CR4_GUEST_BITS); + vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; if (enable_ept) vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; @@ -5893,6 +5895,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) (exit_reason != EXIT_REASON_EXCEPTION_NMI && exit_reason != EXIT_REASON_EPT_VIOLATION && exit_reason != EXIT_REASON_PML_FULL && + exit_reason != EXIT_REASON_APIC_ACCESS && exit_reason != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; @@ -6427,23 +6430,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) msrs[i].host, false); } -static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx) -{ - u32 host_umwait_control; - - if (!vmx_has_waitpkg(vmx)) - return; - - host_umwait_control = get_umwait_control_msr(); - - if (vmx->msr_ia32_umwait_control != host_umwait_control) - add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL, - vmx->msr_ia32_umwait_control, - host_umwait_control, false); - else - clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL); -} - static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -6533,7 +6519,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) pt_guest_enter(vmx); atomic_switch_perf_msrs(vmx); - atomic_switch_umwait_control_msr(vmx); if (enable_preemption_timer) vmx_update_hv_timer(vcpu); @@ -7272,11 +7257,11 @@ static void vmx_flush_log_dirty(struct kvm *kvm) kvm_flush_pml_buffers(kvm); } -static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) +static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) { struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); - gpa_t gpa, dst; + gpa_t dst; if (is_guest_mode(vcpu)) { WARN_ON_ONCE(vmx->nested.pml_full); @@ -7295,7 +7280,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) return 1; } - gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; + gpa &= ~0xFFFull; dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 295c5f83842e..a1919ec7fd10 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -14,8 +14,6 @@ extern const u32 vmx_msr_index[]; extern u64 host_efer; -extern u32 get_umwait_control_msr(void); - #define MSR_TYPE_R 1 #define MSR_TYPE_W 2 #define MSR_TYPE_RW 3 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fff279fb173b..8920ee7b2881 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -972,7 +972,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; + X86_CR4_SMEP; if (kvm_valid_cr4(vcpu, cr4)) return 1; @@ -980,6 +980,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; + if ((cr4 ^ old_cr4) & X86_CR4_LA57) + return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, @@ -2753,7 +2755,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return kvm_mtrr_set_msr(vcpu, msr, data); case MSR_IA32_APICBASE: return kvm_set_apic_base(vcpu, msr_info); - case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); @@ -3057,7 +3059,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_APICBASE: msr_info->data = kvm_get_apic_base(vcpu); break; - case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); break; case MSR_IA32_TSCDEADLINE: diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index fff28c6f73a2..b0dfac3d3df7 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -24,6 +24,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) asm volatile( " testq %[size8],%[size8]\n" " jz 4f\n" + " .align 16\n" "0: movq $0,(%[dst])\n" " addq $8,%[dst]\n" " decl %%ecx ; jnz 0b\n" diff --git a/arch/x86/math-emu/wm_sqrt.S b/arch/x86/math-emu/wm_sqrt.S index f031c0e19356..515cdee90df7 100644 --- a/arch/x86/math-emu/wm_sqrt.S +++ b/arch/x86/math-emu/wm_sqrt.S @@ -209,7 +209,7 @@ sqrt_stage_2_finish: #ifdef PARANOID /* It should be possible to get here only if the arg is ffff....ffff */ - cmp $0xffffffff,FPU_fsqrt_arg_1 + cmpl $0xffffffff,FPU_fsqrt_arg_1 jnz sqrt_stage_2_error #endif /* PARANOID */ diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index fd10d91a6115..af352e228fa2 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -121,8 +121,6 @@ __ref void *alloc_low_pages(unsigned int num) } else { pfn = pgt_buf_end; pgt_buf_end += num; - printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n", - pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); } for (i = 0; i < num; i++) { diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index abffa0be80da..87282258d5be 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -321,7 +321,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, u64 addr, u64 max_addr, u64 size) { return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size, - 0, NULL, NUMA_NO_NODE); + 0, NULL, 0); } int __init setup_emu2phys_nid(int *dfl_phys_nid) diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 91220cc25854..5c11ae66b5d8 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -26,6 +26,7 @@ #include #include #include +#include #include static int xen_pcifront_enable_irq(struct pci_dev *dev) diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c index fc13cbbb2dce..abb6075397f0 100644 --- a/arch/x86/platform/uv/uv_irq.c +++ b/arch/x86/platform/uv/uv_irq.c @@ -167,9 +167,10 @@ static struct irq_domain *uv_get_irq_domain(void) goto out; uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL); - irq_domain_free_fwnode(fn); if (uv_domain) uv_domain->parent = x86_vector_domain; + else + irq_domain_free_fwnode(fn); out: mutex_unlock(&uv_lock); diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index fb4ee5444379..9733d1cc791d 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -17,7 +17,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib targets += purgatory.ro +# Sanitizer, etc. runtimes are unavailable and cannot be linked here. +GCOV_PROFILE := n KASAN_SANITIZE := n +UBSAN_SANITIZE := n KCOV_INSTRUMENT := n # These are adjustments to the compiler flags used for objects that @@ -25,7 +28,7 @@ KCOV_INSTRUMENT := n PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) +PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That # in turn leaves some undefined symbols like __fentry__ in purgatory and not diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index f092cc3f4e66..956d4d47c6cd 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -55,6 +55,10 @@ struct thread_info { mm_segment_t addr_limit; /* thread address space */ unsigned long cpenable; +#if XCHAL_HAVE_EXCLUSIVE + /* result of the most recent exclusive store */ + unsigned long atomctl8; +#endif /* Allocate storage for extra user states and coprocessor states. */ #if XTENSA_HAVE_COPROCESSORS diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 33a257b33723..dc5c83cad9be 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -93,6 +93,9 @@ int main(void) DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable)); +#if XCHAL_HAVE_EXCLUSIVE + DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8)); +#endif #if XTENSA_HAVE_COPROCESSORS DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0)); DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1)); diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 9e3676879168..59671603c9c6 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -374,6 +374,11 @@ common_exception: s32i a2, a1, PT_LCOUNT #endif +#if XCHAL_HAVE_EXCLUSIVE + /* Clear exclusive access monitor set by interrupted code */ + clrex +#endif + /* It is now save to restore the EXC_TABLE_FIXUP variable. */ rsr a2, exccause @@ -2024,6 +2029,12 @@ ENTRY(_switch_to) s32i a3, a4, THREAD_CPENABLE #endif +#if XCHAL_HAVE_EXCLUSIVE + l32i a3, a5, THREAD_ATOMCTL8 + getex a3 + s32i a3, a4, THREAD_ATOMCTL8 +#endif + /* Flush register file. */ spill_registers_kernel diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c index 9bae79f70301..86c9ba963155 100644 --- a/arch/xtensa/kernel/perf_event.c +++ b/arch/xtensa/kernel/perf_event.c @@ -401,7 +401,7 @@ static struct pmu xtensa_pmu = { .read = xtensa_pmu_read, }; -static int xtensa_pmu_setup(int cpu) +static int xtensa_pmu_setup(unsigned int cpu) { unsigned i; diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index e0e1e1892b86..d08172138369 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -716,7 +716,8 @@ c_start(struct seq_file *f, loff_t *pos) static void * c_next(struct seq_file *f, void *v, loff_t *pos) { - return NULL; + ++*pos; + return c_start(f, pos); } static void diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 4092555828b1..24cf6972eace 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -87,13 +87,13 @@ void __xtensa_libgcc_window_spill(void) } EXPORT_SYMBOL(__xtensa_libgcc_window_spill); -unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v) +unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v) { BUG(); } EXPORT_SYMBOL(__sync_fetch_and_and_4); -unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v) +unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v) { BUG(); } diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 12b707a4e52f..342a1cfa48c5 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg) kfree(bfqg); } -void bfqg_and_blkg_get(struct bfq_group *bfqg) +static void bfqg_and_blkg_get(struct bfq_group *bfqg) { /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ bfqg_get(bfqg); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 88497bff1135..ba32adaeefdd 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -5890,18 +5890,6 @@ static void bfq_finish_requeue_request(struct request *rq) struct bfq_queue *bfqq = RQ_BFQQ(rq); struct bfq_data *bfqd; - /* - * Requeue and finish hooks are invoked in blk-mq without - * checking whether the involved request is actually still - * referenced in the scheduler. To handle this fact, the - * following two checks make this function exit in case of - * spurious invocations, for which there is nothing to do. - * - * First, check whether rq has nothing to do with an elevator. - */ - if (unlikely(!(rq->rq_flags & RQF_ELVPRIV))) - return; - /* * rq either is not associated with any icq, or is an already * requeued request that has not (yet) been re-inserted into diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index c0232975075d..de98fdfe9ea1 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -980,7 +980,6 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); struct bfq_group *bfqq_group(struct bfq_queue *bfqq); struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); -void bfqg_and_blkg_get(struct bfq_group *bfqg); void bfqg_and_blkg_put(struct bfq_group *bfqg); #ifdef CONFIG_BFQ_GROUP_IOSCHED diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 44079147e396..05f0bf4a1144 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -536,9 +536,7 @@ static void bfq_get_entity(struct bfq_entity *entity) bfqq->ref++; bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", bfqq, bfqq->ref); - } else - bfqg_and_blkg_get(container_of(entity, struct bfq_group, - entity)); + } } /** @@ -652,14 +650,8 @@ static void bfq_forget_entity(struct bfq_service_tree *st, entity->on_st = false; st->wsum -= entity->weight; - if (is_in_service) - return; - - if (bfqq) + if (bfqq && !is_in_service) bfq_put_queue(bfqq); - else - bfqg_and_blkg_put(container_of(entity, struct bfq_group, - entity)); } /** diff --git a/block/bio-integrity.c b/block/bio-integrity.c index bf62c25cde8f..c9dc2b17ce25 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -24,6 +24,18 @@ void blk_flush_integrity(void) flush_workqueue(kintegrityd_wq); } +void __bio_integrity_free(struct bio_set *bs, struct bio_integrity_payload *bip) +{ + if (bs && mempool_initialized(&bs->bio_integrity_pool)) { + if (bip->bip_vec) + bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, + bip->bip_slab); + mempool_free(bip, &bs->bio_integrity_pool); + } else { + kfree(bip); + } +} + /** * bio_integrity_alloc - Allocate integrity payload and attach it to bio * @bio: bio to attach integrity metadata to @@ -75,7 +87,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, return bip; err: - mempool_free(bip, &bs->bio_integrity_pool); + __bio_integrity_free(bs, bip); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(bio_integrity_alloc); @@ -96,14 +108,7 @@ void bio_integrity_free(struct bio *bio) kfree(page_address(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset); - if (bs && mempool_initialized(&bs->bio_integrity_pool)) { - bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab); - - mempool_free(bip, &bs->bio_integrity_pool); - } else { - kfree(bip); - } - + __bio_integrity_free(bs, bip); bio->bi_integrity = NULL; bio->bi_opf &= ~REQ_INTEGRITY; } @@ -278,7 +283,6 @@ bool bio_integrity_prep(struct bio *bio) if (ret == 0) { printk(KERN_ERR "could not attach integrity payload\n"); - kfree(buf); status = BLK_STS_RESOURCE; goto err_end_io; } diff --git a/block/bio.c b/block/bio.c index 94d697217887..f07739300dfe 100644 --- a/block/bio.c +++ b/block/bio.c @@ -683,8 +683,8 @@ static inline bool page_is_mergeable(const struct bio_vec *bv, struct page *page, unsigned int len, unsigned int off, bool *same_page) { - phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + - bv->bv_offset + bv->bv_len - 1; + size_t bv_end = bv->bv_offset + bv->bv_len; + phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; phys_addr_t page_addr = page_to_phys(page); if (vec_end_addr + 1 != page_addr + off) @@ -693,9 +693,9 @@ static inline bool page_is_mergeable(const struct bio_vec *bv, return false; *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); - if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page) - return false; - return true; + if (*same_page) + return true; + return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); } static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio, @@ -807,8 +807,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; if (page_is_mergeable(bv, page, len, off, same_page)) { - if (bio->bi_iter.bi_size > UINT_MAX - len) + if (bio->bi_iter.bi_size > UINT_MAX - len) { + *same_page = false; return false; + } bv->bv_len += len; bio->bi_iter.bi_size += len; return true; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 1eb8895be4c6..0c7addcd1985 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1219,13 +1219,15 @@ int blkcg_init_queue(struct request_queue *q) if (preloaded) radix_tree_preload_end(); - ret = blk_iolatency_init(q); - if (ret) - goto err_destroy_all; - ret = blk_throtl_init(q); if (ret) goto err_destroy_all; + + ret = blk_iolatency_init(q); + if (ret) { + blk_throtl_exit(q); + goto err_destroy_all; + } return 0; err_destroy_all: diff --git a/block/blk-core.c b/block/blk-core.c index d5e668ec751b..ca6b67735686 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -502,6 +502,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) goto fail_stats; q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES; + q->backing_dev_info->io_pages = VM_READAHEAD_PAGES; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->backing_dev_info->name = "block"; q->node = node_id; diff --git a/block/blk-iocost.c b/block/blk-iocost.c index d083f7704082..ef287c33d6d9 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1377,7 +1377,7 @@ static void ioc_timer_fn(struct timer_list *timer) * should have woken up in the last period and expire idle iocgs. */ list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { - if (!waitqueue_active(&iocg->waitq) && iocg->abs_vdebt && + if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt && !iocg_is_idle(iocg)) continue; @@ -1546,19 +1546,39 @@ skip_surplus_transfers: if (rq_wait_pct > RQ_WAIT_BUSY_PCT || missed_ppm[READ] > ppm_rthr || missed_ppm[WRITE] > ppm_wthr) { + /* clearly missing QoS targets, slow down vrate */ ioc->busy_level = max(ioc->busy_level, 0); ioc->busy_level++; } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 && missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 && missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) { - /* take action iff there is contention */ - if (nr_shortages && !nr_lagging) { + /* QoS targets are being met with >25% margin */ + if (nr_shortages) { + /* + * We're throttling while the device has spare + * capacity. If vrate was being slowed down, stop. + */ ioc->busy_level = min(ioc->busy_level, 0); - /* redistribute surpluses first */ - if (!nr_surpluses) + + /* + * If there are IOs spanning multiple periods, wait + * them out before pushing the device harder. If + * there are surpluses, let redistribution work it + * out first. + */ + if (!nr_lagging && !nr_surpluses) ioc->busy_level--; + } else { + /* + * Nobody is being throttled and the users aren't + * issuing enough IOs to saturate the device. We + * simply don't know how close the device is to + * saturation. Coast. + */ + ioc->busy_level = 0; } } else { + /* inside the hysterisis margin, we're good */ ioc->busy_level = 0; } @@ -2054,14 +2074,15 @@ static void ioc_pd_free(struct blkg_policy_data *pd) { struct ioc_gq *iocg = pd_to_iocg(pd); struct ioc *ioc = iocg->ioc; + unsigned long flags; if (ioc) { - spin_lock(&ioc->lock); + spin_lock_irqsave(&ioc->lock, flags); if (!list_empty(&iocg->active_list)) { propagate_active_weight(iocg, 0, 0); list_del_init(&iocg->active_list); } - spin_unlock(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); hrtimer_cancel(&iocg->waitq_timer); hrtimer_cancel(&iocg->delay_timer); diff --git a/block/blk-merge.c b/block/blk-merge.c index 48e6725b32ee..86c4c1ef8742 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -154,20 +154,23 @@ static inline unsigned get_max_io_size(struct request_queue *q, if (max_sectors > start_offset) return max_sectors - start_offset; - return sectors & (lbs - 1); + return sectors & ~(lbs - 1); } -static unsigned get_max_segment_size(const struct request_queue *q, - unsigned offset) +static inline unsigned get_max_segment_size(const struct request_queue *q, + struct page *start_page, + unsigned long offset) { unsigned long mask = queue_segment_boundary(q); - /* default segment boundary mask means no boundary limit */ - if (mask == BLK_SEG_BOUNDARY_MASK) - return queue_max_segment_size(q); + offset = mask & (page_to_phys(start_page) + offset); - return min_t(unsigned long, mask - (mask & offset) + 1, - queue_max_segment_size(q)); + /* + * overflow may be triggered in case of zero page physical address + * on 32bit arch, use queue's max segment size when that happens. + */ + return min_not_zero(mask - offset + 1, + (unsigned long)queue_max_segment_size(q)); } /** @@ -201,7 +204,8 @@ static bool bvec_split_segs(const struct request_queue *q, unsigned seg_size = 0; while (len && *nsegs < max_segs) { - seg_size = get_max_segment_size(q, bv->bv_offset + total_len); + seg_size = get_max_segment_size(q, bv->bv_page, + bv->bv_offset + total_len); seg_size = min(seg_size, len); (*nsegs)++; @@ -404,7 +408,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q, while (nbytes > 0) { unsigned offset = bvec->bv_offset + total; - unsigned len = min(get_max_segment_size(q, offset), nbytes); + unsigned len = min(get_max_segment_size(q, bvec->bv_page, + offset), nbytes); struct page *page = bvec->bv_page; /* @@ -548,10 +553,17 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(blk_rq_map_sg); +static inline unsigned int blk_rq_get_max_segments(struct request *rq) +{ + if (req_op(rq) == REQ_OP_DISCARD) + return queue_max_discard_segments(rq->q); + return queue_max_segments(rq->q); +} + static inline int ll_new_hw_segment(struct request *req, struct bio *bio, unsigned int nr_phys_segs) { - if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q)) + if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req)) goto no_merge; if (blk_integrity_merge_bio(req->q, req, bio) == false) @@ -635,7 +647,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, return 0; total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; - if (total_phys_segments > queue_max_segments(q)) + if (total_phys_segments > blk_rq_get_max_segments(req)) return 0; if (blk_integrity_merge_rq(q, req, next) == false) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index b3f2ba483992..121f4c1e0697 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -125,6 +125,9 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(REGISTERED), QUEUE_FLAG_NAME(SCSI_PASSTHROUGH), QUEUE_FLAG_NAME(QUIESCED), + QUEUE_FLAG_NAME(PCI_P2PDMA), + QUEUE_FLAG_NAME(ZONE_RESETALL), + QUEUE_FLAG_NAME(RQ_ALLOC_TIME), }; #undef QUEUE_FLAG_NAME diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 74cedea56034..7620734d5542 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -77,6 +77,15 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) return; clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); + /* + * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) + * in blk_mq_run_hw_queue(). Its pair is the barrier in + * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, + * meantime new request added to hctx->dispatch is missed to check in + * blk_mq_run_hw_queue(). + */ + smp_mb(); + blk_mq_run_hw_queue(hctx, true); } diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 126021fc3a11..e81ca1bf6e10 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -66,7 +66,7 @@ static inline void blk_mq_sched_requeue_request(struct request *rq) struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; - if (e && e->type->ops.requeue_request) + if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request) e->type->ops.requeue_request(rq); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 757c0fd9f0cc..b748d1e63f9c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -829,10 +829,10 @@ static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) { /* - * If we find a request that is inflight and the queue matches, + * If we find a request that isn't idle and the queue matches, * we know the queue is busy. Return false to stop the iteration. */ - if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) { + if (blk_mq_request_started(rq) && rq->q == hctx->queue) { bool *busy = priv; *busy = true; @@ -1318,6 +1318,15 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, list_splice_tail_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); + /* + * Order adding requests to hctx->dispatch and checking + * SCHED_RESTART flag. The pair of this smp_mb() is the one + * in blk_mq_sched_restart(). Avoid restart code path to + * miss the new added requests to hctx->dispatch, meantime + * SCHED_RESTART is observed here. + */ + smp_mb(); + /* * If SCHED_RESTART was set by the caller of this function and * it is no longer set that means that it was cleared by another @@ -1869,7 +1878,8 @@ insert: if (bypass_insert) return BLK_STS_RESOURCE; - blk_mq_request_bypass_insert(rq, false, run_queue); + blk_mq_sched_insert_request(rq, false, run_queue, false); + return BLK_STS_OK; } @@ -2493,18 +2503,6 @@ static void blk_mq_map_swqueue(struct request_queue *q) * If the cpu isn't present, the cpu is mapped to first hctx. */ for_each_possible_cpu(i) { - hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i]; - /* unmapped hw queue can be remapped after CPU topo changed */ - if (!set->tags[hctx_idx] && - !__blk_mq_alloc_rq_map(set, hctx_idx)) { - /* - * If tags initialization fail for some hctx, - * that hctx won't be brought online. In this - * case, remap the current ctx to hctx[0] which - * is guaranteed to always have tags allocated - */ - set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0; - } ctx = per_cpu_ptr(q->queue_ctx, i); for (j = 0; j < set->nr_maps; j++) { @@ -2513,6 +2511,18 @@ static void blk_mq_map_swqueue(struct request_queue *q) HCTX_TYPE_DEFAULT, i); continue; } + hctx_idx = set->map[j].mq_map[i]; + /* unmapped hw queue can be remapped after CPU topo changed */ + if (!set->tags[hctx_idx] && + !__blk_mq_alloc_rq_map(set, hctx_idx)) { + /* + * If tags initialization fail for some hctx, + * that hctx won't be brought online. In this + * case, remap the current ctx to hctx[0] which + * is guaranteed to always have tags allocated + */ + set->map[j].mq_map[i] = 0; + } hctx = blk_mq_map_queue_type(q, j, i); ctx->hctxs[j] = hctx; @@ -3279,7 +3289,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) nr_hw_queues = nr_cpu_ids; - if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues) + if (nr_hw_queues < 1) + return; + if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) return; list_for_each_entry(q, &set->tag_list, tag_set_list) @@ -3304,8 +3316,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, prev_nr_hw_queues = set->nr_hw_queues; set->nr_hw_queues = nr_hw_queues; - blk_mq_update_queue_map(set); fallback: + blk_mq_update_queue_map(set); list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_realloc_hw_ctxs(set, q); if (q->nr_hw_queues != set->nr_hw_queues) { diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 3d8e53010cda..1d4b0157ee5d 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -128,21 +129,15 @@ EXPORT_SYMBOL_GPL(af_alg_release); void af_alg_release_parent(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); - unsigned int nokey = ask->nokey_refcnt; - bool last = nokey && !ask->refcnt; + unsigned int nokey = atomic_read(&ask->nokey_refcnt); sk = ask->parent; ask = alg_sk(sk); - local_bh_disable(); - bh_lock_sock(sk); - ask->nokey_refcnt -= nokey; - if (!last) - last = !--ask->refcnt; - bh_unlock_sock(sk); - local_bh_enable(); + if (nokey) + atomic_dec(&ask->nokey_refcnt); - if (last) + if (atomic_dec_and_test(&ask->refcnt)) sock_put(sk); } EXPORT_SYMBOL_GPL(af_alg_release_parent); @@ -187,7 +182,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = -EBUSY; lock_sock(sk); - if (ask->refcnt | ask->nokey_refcnt) + if (atomic_read(&ask->refcnt)) goto unlock; swap(ask->type, type); @@ -236,7 +231,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, int err = -EBUSY; lock_sock(sk); - if (ask->refcnt) + if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt)) goto unlock; type = ask->type; @@ -301,12 +296,14 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) if (err) goto unlock; - if (nokey || !ask->refcnt++) + if (atomic_inc_return_relaxed(&ask->refcnt) == 1) sock_hold(sk); - ask->nokey_refcnt += nokey; + if (nokey) { + atomic_inc(&ask->nokey_refcnt); + atomic_set(&alg_sk(sk2)->nokey_refcnt, 1); + } alg_sk(sk2)->parent = sk; alg_sk(sk2)->type = type; - alg_sk(sk2)->nokey_refcnt = nokey; newsock->ops = type->ops; newsock->state = SS_CONNECTED; @@ -639,6 +636,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, if (!ctx->used) ctx->merge = 0; + ctx->init = ctx->more; } EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); @@ -738,9 +736,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); * * @sk socket of connection to user space * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @min Set to minimum request size if partial requests are allowed. * @return 0 when writable memory is available, < 0 upon error */ -int af_alg_wait_for_data(struct sock *sk, unsigned flags) +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); @@ -758,7 +757,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags) if (signal_pending(current)) break; timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), + if (sk_wait_event(sk, &timeout, + ctx->init && (!ctx->more || + (min && ctx->used >= min)), &wait)) { err = 0; break; @@ -847,10 +848,17 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); - if (!ctx->more && ctx->used) { - err = -EINVAL; - goto unlock; + if (ctx->init && !ctx->more) { + if (ctx->used) { + err = -EINVAL; + goto unlock; + } + + pr_info_once( + "%s sent an empty control message without MSG_MORE.\n", + current->comm); } + ctx->init = true; if (init) { ctx->enc = enc; diff --git a/crypto/algboss.c b/crypto/algboss.c index a62149d6c839..2d41e67532c0 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -188,8 +188,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) if (IS_ERR(thread)) goto err_put_larval; - wait_for_completion_interruptible(&larval->completion); - return NOTIFY_STOP; err_put_larval: diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index eb1910b6d434..43c6aa784858 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || ctx->more) { + err = af_alg_wait_for_data(sk, flags, 0); if (err) return err; } @@ -384,7 +384,7 @@ static int aead_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -396,11 +396,8 @@ static int aead_check_key(struct socket *sock) if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; @@ -561,12 +558,6 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; - ctx->used = 0; - atomic_set(&ctx->rcvused, 0); - ctx->more = 0; - ctx->merge = 0; - ctx->enc = 0; - ctx->aead_assoclen = 0; crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 178f4cd75ef1..8673ac8828e9 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -301,7 +301,7 @@ static int hash_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -313,11 +313,8 @@ static int hash_check_key(struct socket *sock) if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index e2c8ab408bed..81c4022285a7 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || (ctx->more && ctx->used < bs)) { + err = af_alg_wait_for_data(sk, flags, bs); if (err) return err; } @@ -74,14 +74,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, return PTR_ERR(areq); /* convert iovecs of output buffers into RX SGL */ - err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len); + err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len); if (err) goto free; - /* Process only as much RX buffers for which we have TX data */ - if (len > ctx->used) - len = ctx->used; - /* * If more buffers are to be expected to be processed, process only * full block size buffers. @@ -215,7 +211,7 @@ static int skcipher_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -227,11 +223,8 @@ static int skcipher_check_key(struct socket *sock) if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; @@ -340,6 +333,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; + memset(ctx, 0, len); ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm), GFP_KERNEL); @@ -347,16 +341,10 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) sock_kfree_s(sk, ctx, len); return -ENOMEM; } - memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm)); INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; - ctx->used = 0; - atomic_set(&ctx->rcvused, 0); - ctx->more = 0; - ctx->merge = 0; - ctx->enc = 0; crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index d7f43d4ea925..e5fae4e838c0 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -119,6 +119,7 @@ static int software_key_query(const struct kernel_pkey_params *params, if (IS_ERR(tfm)) return PTR_ERR(tfm); + ret = -ENOMEM; key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); if (!key) diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c index 57d9d574d4dd..01738d8e888e 100644 --- a/drivers/acpi/acpi_configfs.c +++ b/drivers/acpi/acpi_configfs.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "acpica/accommon.h" #include "acpica/actables.h" @@ -28,7 +29,10 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg, { const struct acpi_table_header *header = data; struct acpi_table *table; - int ret; + int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); + + if (ret) + return ret; table = container_of(cfg, struct acpi_table, cfg); diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 8438e33aa447..fd9028a6bc20 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -518,13 +518,20 @@ acpi_ds_create_field(union acpi_parse_object *op, info.region_node = region_node; status = acpi_ds_get_field_names(&info, walk_state, arg->common.next); - if (info.region_node->object->region.space_id == - ACPI_ADR_SPACE_PLATFORM_COMM - && !(region_node->object->field.internal_pcc_buffer = - ACPI_ALLOCATE_ZEROED(info.region_node->object->region. - length))) { - return_ACPI_STATUS(AE_NO_MEMORY); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); } + + if (info.region_node->object->region.space_id == + ACPI_ADR_SPACE_PLATFORM_COMM) { + region_node->object->field.internal_pcc_buffer = + ACPI_ALLOCATE_ZEROED(info.region_node->object->region. + length); + if (!region_node->object->field.internal_pcc_buffer) { + return_ACPI_STATUS(AE_NO_MEMORY); + } + } + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 728d752f7adc..85f799c9c25c 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -473,10 +473,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) (u8)access_byte_width; } } - /* An additional reference for the container */ - - acpi_ut_add_reference(obj_desc->field.region_obj); - ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", obj_desc->field.start_field_bit_offset, diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index c365faf4e6cd..4c0d4e434196 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -568,11 +568,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) next_object = object->buffer_field.buffer_obj; break; - case ACPI_TYPE_LOCAL_REGION_FIELD: - - next_object = object->field.region_obj; - break; - case ACPI_TYPE_LOCAL_BANK_FIELD: next_object = object->bank_field.bank_obj; @@ -613,6 +608,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) } break; + case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_REGION: default: diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 5a7551d060f2..bc95a5eebd13 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -361,6 +361,7 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, static int iort_get_id_mapping_index(struct acpi_iort_node *node) { struct acpi_iort_smmu_v3 *smmu; + struct acpi_iort_pmcg *pmcg; switch (node->type) { case ACPI_IORT_NODE_SMMU_V3: @@ -388,6 +389,10 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node) return smmu->id_mapping_index; case ACPI_IORT_NODE_PMCG: + pmcg = (struct acpi_iort_pmcg *)node->node_data; + if (pmcg->overflow_gsiv || node->mapping_count == 0) + return -EINVAL; + return 0; default: return -EINVAL; diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c index 6d7a522952bf..ccd900690b6f 100644 --- a/drivers/acpi/evged.c +++ b/drivers/acpi/evged.c @@ -94,7 +94,7 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, trigger = p->triggering; } else { gsi = pext->interrupts[0]; - trigger = p->triggering; + trigger = pext->triggering; } irq = r.start; diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 3a89909b50a6..76c668c05fa0 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -938,13 +938,13 @@ static void __exit interrupt_stats_exit(void) } static ssize_t -acpi_show_profile(struct device *dev, struct device_attribute *attr, +acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); } -static const struct device_attribute pm_profile_attr = +static const struct kobj_attribute pm_profile_attr = __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); static ssize_t hotplug_enabled_show(struct kobject *kobj, diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index e63fd7bfd3a5..5bcb4c01ec5f 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -336,6 +336,25 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"), }, }, + { + .callback = video_detect_force_native, + .ident = "Acer Aspire 5738z", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), + DMI_MATCH(DMI_BOARD_NAME, "JV50"), + }, + }, + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */ + .callback = video_detect_force_native, + .ident = "Acer TravelMate 5735Z", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5735Z"), + DMI_MATCH(DMI_BOARD_NAME, "BA51_MV"), + }, + }, /* * Desktops which falsely report a backlight and which our heuristics diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 34a6de65aa7e..110dd4c2977f 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2984,6 +2984,12 @@ static void binder_transaction(struct binder_proc *proc, goto err_dead_binder; } e->to_node = target_node->debug_id; + if (WARN_ON(proc == target_proc)) { + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_invalid_target_handle; + } if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { return_error = BR_FAILED_REPLY; @@ -3637,10 +3643,17 @@ static int binder_thread_write(struct binder_proc *proc, struct binder_node *ctx_mgr_node; mutex_lock(&context->context_mgr_node_lock); ctx_mgr_node = context->binder_context_mgr_node; - if (ctx_mgr_node) + if (ctx_mgr_node) { + if (ctx_mgr_node->proc == proc) { + binder_user_error("%d:%d context manager tried to acquire desc 0\n", + proc->pid, thread->pid); + mutex_unlock(&context->context_mgr_node_lock); + return -EINVAL; + } ret = binder_inc_ref_for_node( proc, ctx_mgr_node, strong, NULL, &rdata); + } mutex_unlock(&context->context_mgr_node_lock); } if (ret) @@ -4688,8 +4701,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc) static void binder_free_proc(struct binder_proc *proc) { + struct binder_device *device; + BUG_ON(!list_empty(&proc->todo)); BUG_ON(!list_empty(&proc->delivered_death)); + device = container_of(proc->context, struct binder_device, context); + if (refcount_dec_and_test(&device->ref)) { + kfree(proc->context->name); + kfree(device); + } binder_alloc_deferred_release(&proc->alloc); put_task_struct(proc->tsk); binder_stats_deleted(BINDER_STAT_PROC); @@ -5408,7 +5428,6 @@ static int binder_node_release(struct binder_node *node, int refs) static void binder_deferred_release(struct binder_proc *proc) { struct binder_context *context = proc->context; - struct binder_device *device; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; @@ -5425,12 +5444,6 @@ static void binder_deferred_release(struct binder_proc *proc) context->binder_context_mgr_node = NULL; } mutex_unlock(&context->context_mgr_node_lock); - device = container_of(proc->context, struct binder_device, context); - if (refcount_dec_and_test(&device->ref)) { - kfree(context->name); - kfree(device); - } - proc->context = NULL; binder_inner_proc_lock(proc); /* * Make sure proc stays alive after we diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 7067d5542a82..2048ba6c8b08 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -948,7 +948,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, trace_binder_unmap_user_end(alloc, index); } up_read(&mm->mmap_sem); - mmput(mm); + mmput_async(mm); trace_binder_unmap_kernel_start(alloc, index); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 581595b35573..066b37963ad5 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include #include @@ -4475,9 +4474,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, - /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on - SD7SN6S256G and SD8SN8U256G */ - { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, + /* Sandisk SD7/8/9s lock up hard on large trims */ + { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, }, /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, @@ -6592,7 +6590,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) /* perform each probe asynchronously */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - async_schedule(async_port_probe, ap); + ap->cookie = async_schedule(async_port_probe, ap); } return 0; @@ -6732,11 +6730,11 @@ void ata_host_detach(struct ata_host *host) { int i; - /* Ensure ata_port probe has completed */ - async_synchronize_full(); - - for (i = 0; i < host->n_ports; i++) + for (i = 0; i < host->n_ports; i++) { + /* Ensure ata_port probe has completed */ + async_synchronize_cookie(host->ports[i]->cookie + 1); ata_port_detach(host->ports[i]); + } /* the host is dead now, dissociate ACPI */ ata_acpi_dissociate(host); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 5af34a3201ed..464efedc778b 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2374,6 +2374,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) { + struct ata_device *dev = args->dev; u16 min_io_sectors; rbuf[1] = 0xb0; @@ -2399,7 +2400,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) * with the unmap bit set. */ if (ata_id_has_trim(args->id)) { - put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]); + u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM; + + if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) + max_blocks = 128 << (20 - SECTOR_SHIFT); + + put_unaligned_be64(max_blocks, &rbuf[36]); put_unaligned_be32(1, &rbuf[28]); } @@ -3978,12 +3984,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; const u8 *cdb = scmd->cmnd; - const u8 *p; u8 pg, spg; unsigned six_byte, pg_len, hdr_len, bd_len; int len; u16 fp = (u16)-1; u8 bp = 0xff; + u8 buffer[64]; + const u8 *p = buffer; VPRINTK("ENTER\n"); @@ -4017,12 +4024,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) goto invalid_param_len; - p = page_address(sg_page(scsi_sglist(scmd))); - /* Move past header and block descriptors. */ if (len < hdr_len) goto invalid_param_len; + if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd), + buffer, sizeof(buffer))) + goto invalid_param_len; + if (six_byte) bd_len = p[3]; else diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 3495e1733a8e..c35b7b993133 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -905,7 +905,7 @@ static int sata_rcar_probe(struct platform_device *pdev) pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) - goto err_pm_disable; + goto err_pm_put; host = ata_host_alloc(dev, 1); if (!host) { @@ -935,7 +935,6 @@ static int sata_rcar_probe(struct platform_device *pdev) err_pm_put: pm_runtime_put(dev); -err_pm_disable: pm_runtime_disable(dev); return ret; } @@ -989,8 +988,10 @@ static int sata_rcar_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } if (priv->type == RCAR_GEN3_SATA) { sata_rcar_init_module(priv); @@ -1015,8 +1016,10 @@ static int sata_rcar_restore(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } sata_rcar_setup_port(host); diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index d9fd70280482..7f814da3c2d0 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf) return -EMEDIUMTYPE; } dev_data = PRIV(dev); - if (!dev_data->persist) return 0; + if (!dev_data->persist) { + atm_dev_put(dev); + return 0; + } dev_data->persist = 0; - if (PRIV(dev)->vcc) return 0; + if (PRIV(dev)->vcc) { + atm_dev_put(dev); + return 0; + } kfree(dev_data); atm_dev_put(dev); atm_dev_deregister(dev); diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index d287837ed755..5acb45985675 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -998,6 +998,7 @@ static int fs_open(struct atm_vcc *atm_vcc) error = make_rate (pcr, r, &tmc0, NULL); if (error) { kfree(tc); + kfree(vcc); return error; } } diff --git a/drivers/base/core.c b/drivers/base/core.c index ee420f0fdb52..8a973958d11a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -3442,9 +3442,9 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) */ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) { - if (fwnode) { - struct fwnode_handle *fn = dev->fwnode; + struct fwnode_handle *fn = dev->fwnode; + if (fwnode) { if (fwnode_is_primary(fn)) fn = fn->secondary; @@ -3454,8 +3454,12 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) } dev->fwnode = fwnode; } else { - dev->fwnode = fwnode_is_primary(dev->fwnode) ? - dev->fwnode->secondary : NULL; + if (fwnode_is_primary(fn)) { + dev->fwnode = fn->secondary; + fn->secondary = NULL; + } else { + dev->fwnode = NULL; + } } } EXPORT_SYMBOL_GPL(set_primary_fwnode); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 635f508f25c7..f8746aa3c14b 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -872,7 +872,9 @@ static int __device_attach(struct device *dev, bool allow_async) int ret = 0; device_lock(dev); - if (dev->driver) { + if (dev->p->dead) { + goto out_unlock; + } else if (dev->driver) { if (device_is_bound(dev)) { ret = 1; goto out_unlock; diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h index 7ecd590e67fe..9bef6c35f344 100644 --- a/drivers/base/firmware_loader/firmware.h +++ b/drivers/base/firmware_loader/firmware.h @@ -139,10 +139,12 @@ int assign_fw(struct firmware *fw, struct device *device, void fw_free_paged_buf(struct fw_priv *fw_priv); int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed); int fw_map_paged_buf(struct fw_priv *fw_priv); +bool fw_is_paged_buf(struct fw_priv *fw_priv); #else static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {} static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; } static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; } +static inline bool fw_is_paged_buf(struct fw_priv *fw_priv) { return false; } #endif #endif /* __FIRMWARE_LOADER_H */ diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index bf44c79beae9..95d21b4af904 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -252,9 +252,11 @@ static void __free_fw_priv(struct kref *ref) list_del(&fw_priv->list); spin_unlock(&fwc->lock); - fw_free_paged_buf(fw_priv); /* free leftover pages */ - if (!fw_priv->allocated_size) + if (fw_is_paged_buf(fw_priv)) + fw_free_paged_buf(fw_priv); + else if (!fw_priv->allocated_size) vfree(fw_priv->data); + kfree_const(fw_priv->fw_name); kfree(fw_priv); } @@ -268,6 +270,11 @@ static void free_fw_priv(struct fw_priv *fw_priv) } #ifdef CONFIG_FW_LOADER_PAGED_BUF +bool fw_is_paged_buf(struct fw_priv *fw_priv) +{ + return fw_priv->is_paged_buf; +} + void fw_free_paged_buf(struct fw_priv *fw_priv) { int i; @@ -275,6 +282,8 @@ void fw_free_paged_buf(struct fw_priv *fw_priv) if (!fw_priv->pages) return; + vunmap(fw_priv->data); + for (i = 0; i < fw_priv->nr_pages; i++) __free_page(fw_priv->pages[i]); kvfree(fw_priv->pages); @@ -328,10 +337,6 @@ int fw_map_paged_buf(struct fw_priv *fw_priv) if (!fw_priv->data) return -ENOMEM; - /* page table is no longer needed after mapping, let's free */ - kvfree(fw_priv->pages); - fw_priv->pages = NULL; - return 0; } #endif diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 604a461848c9..0b67d41bab8f 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -802,6 +802,8 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv, /* temporary section violation during probe() */ drv->probe = probe; retval = code = __platform_driver_register(drv, module); + if (retval) + return retval; /* * Fixup that section violation, being paranoid about code scanning diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 8646147dc194..23af54512053 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1728,13 +1728,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) } /* - * If a device configured to wake up the system from sleep states - * has been suspended at run time and there's a resume request pending - * for it, this is equivalent to the device signaling wakeup, so the - * system suspend operation should be aborted. + * Wait for possible runtime PM transitions of the device in progress + * to complete and if there's a runtime resume request pending for it, + * resume it before proceeding with invoking the system-wide suspend + * callbacks for it. + * + * If the system-wide suspend callbacks below change the configuration + * of the device, they must disable runtime PM for it or otherwise + * ensure that its runtime-resume callbacks will not be confused by that + * change in case they are invoked going forward. */ - if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) - pm_wakeup_event(dev, 0); + pm_runtime_barrier(dev); if (pm_wakeup_pending()) { dev->power.direct_complete = false; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 12389457723f..6dffcb71b86c 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -1127,6 +1127,9 @@ static void *wakeup_sources_stats_seq_next(struct seq_file *m, break; } + if (!next_ws) + print_wakeup_source_stats(m, &deleted_ws); + return next_ws; } diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 089e5dc7144a..f58baff2be0a 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -463,29 +463,31 @@ static ssize_t regmap_cache_only_write_file(struct file *file, { struct regmap *map = container_of(file->private_data, struct regmap, cache_only); - ssize_t result; - bool was_enabled, require_sync = false; + bool new_val, require_sync = false; int err; + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; + + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; + map->lock(map->lock_arg); - was_enabled = map->cache_only; - - result = debugfs_write_file_bool(file, user_buf, count, ppos); - if (result < 0) { - map->unlock(map->lock_arg); - return result; - } - - if (map->cache_only && !was_enabled) { + if (new_val && !map->cache_only) { dev_warn(map->dev, "debugfs cache_only=Y forced\n"); add_taint(TAINT_USER, LOCKDEP_STILL_OK); - } else if (!map->cache_only && was_enabled) { + } else if (!new_val && map->cache_only) { dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); require_sync = true; } + map->cache_only = new_val; map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); if (require_sync) { err = regcache_sync(map); @@ -493,7 +495,7 @@ static ssize_t regmap_cache_only_write_file(struct file *file, dev_err(map->dev, "Failed to sync cache %d\n", err); } - return result; + return count; } static const struct file_operations regmap_cache_only_fops = { @@ -508,28 +510,32 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file, { struct regmap *map = container_of(file->private_data, struct regmap, cache_bypass); - ssize_t result; - bool was_enabled; + bool new_val; + int err; + + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; + + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; map->lock(map->lock_arg); - was_enabled = map->cache_bypass; - - result = debugfs_write_file_bool(file, user_buf, count, ppos); - if (result < 0) - goto out; - - if (map->cache_bypass && !was_enabled) { + if (new_val && !map->cache_bypass) { dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); add_taint(TAINT_USER, LOCKDEP_STILL_OK); - } else if (!map->cache_bypass && was_enabled) { + } else if (!new_val && map->cache_bypass) { dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); } + map->cache_bypass = new_val; -out: map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); - return result; + return count; } static const struct file_operations regmap_cache_bypass_fops = { diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index f76e4e879e50..f4e4bf0b571b 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -17,6 +17,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include "trace.h" @@ -249,22 +250,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) { - __be16 *b = buf; - - b[0] = cpu_to_be16(val << shift); + put_unaligned_be16(val << shift, buf); } static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) { - __le16 *b = buf; - - b[0] = cpu_to_le16(val << shift); + put_unaligned_le16(val << shift, buf); } static void regmap_format_16_native(void *buf, unsigned int val, unsigned int shift) { - *(u16 *)buf = val << shift; + u16 v = val << shift; + + memcpy(buf, &v, sizeof(v)); } static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) @@ -280,43 +279,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) { - __be32 *b = buf; - - b[0] = cpu_to_be32(val << shift); + put_unaligned_be32(val << shift, buf); } static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) { - __le32 *b = buf; - - b[0] = cpu_to_le32(val << shift); + put_unaligned_le32(val << shift, buf); } static void regmap_format_32_native(void *buf, unsigned int val, unsigned int shift) { - *(u32 *)buf = val << shift; + u32 v = val << shift; + + memcpy(buf, &v, sizeof(v)); } #ifdef CONFIG_64BIT static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) { - __be64 *b = buf; - - b[0] = cpu_to_be64((u64)val << shift); + put_unaligned_be64((u64) val << shift, buf); } static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) { - __le64 *b = buf; - - b[0] = cpu_to_le64((u64)val << shift); + put_unaligned_le64((u64) val << shift, buf); } static void regmap_format_64_native(void *buf, unsigned int val, unsigned int shift) { - *(u64 *)buf = (u64)val << shift; + u64 v = (u64) val << shift; + + memcpy(buf, &v, sizeof(v)); } #endif @@ -333,35 +328,34 @@ static unsigned int regmap_parse_8(const void *buf) static unsigned int regmap_parse_16_be(const void *buf) { - const __be16 *b = buf; - - return be16_to_cpu(b[0]); + return get_unaligned_be16(buf); } static unsigned int regmap_parse_16_le(const void *buf) { - const __le16 *b = buf; - - return le16_to_cpu(b[0]); + return get_unaligned_le16(buf); } static void regmap_parse_16_be_inplace(void *buf) { - __be16 *b = buf; + u16 v = get_unaligned_be16(buf); - b[0] = be16_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_16_le_inplace(void *buf) { - __le16 *b = buf; + u16 v = get_unaligned_le16(buf); - b[0] = le16_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_16_native(const void *buf) { - return *(u16 *)buf; + u16 v; + + memcpy(&v, buf, sizeof(v)); + return v; } static unsigned int regmap_parse_24(const void *buf) @@ -376,69 +370,67 @@ static unsigned int regmap_parse_24(const void *buf) static unsigned int regmap_parse_32_be(const void *buf) { - const __be32 *b = buf; - - return be32_to_cpu(b[0]); + return get_unaligned_be32(buf); } static unsigned int regmap_parse_32_le(const void *buf) { - const __le32 *b = buf; - - return le32_to_cpu(b[0]); + return get_unaligned_le32(buf); } static void regmap_parse_32_be_inplace(void *buf) { - __be32 *b = buf; + u32 v = get_unaligned_be32(buf); - b[0] = be32_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_32_le_inplace(void *buf) { - __le32 *b = buf; + u32 v = get_unaligned_le32(buf); - b[0] = le32_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_32_native(const void *buf) { - return *(u32 *)buf; + u32 v; + + memcpy(&v, buf, sizeof(v)); + return v; } #ifdef CONFIG_64BIT static unsigned int regmap_parse_64_be(const void *buf) { - const __be64 *b = buf; - - return be64_to_cpu(b[0]); + return get_unaligned_be64(buf); } static unsigned int regmap_parse_64_le(const void *buf) { - const __le64 *b = buf; - - return le64_to_cpu(b[0]); + return get_unaligned_le64(buf); } static void regmap_parse_64_be_inplace(void *buf) { - __be64 *b = buf; + u64 v = get_unaligned_be64(buf); - b[0] = be64_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_64_le_inplace(void *buf) { - __le64 *b = buf; + u64 v = get_unaligned_le64(buf); - b[0] = le64_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_64_native(const void *buf) { - return *(u64 *)buf; + u64 v; + + memcpy(&v, buf, sizeof(v)); + return v; } #endif @@ -1354,6 +1346,7 @@ void regmap_exit(struct regmap *map) if (map->hwlock) hwspin_lock_free(map->hwlock); kfree_const(map->name); + kfree(map->patch); kfree(map); } EXPORT_SYMBOL_GPL(regmap_exit); @@ -1368,7 +1361,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data) /* If the user didn't specify a name match any */ if (data) - return (*r)->name == data; + return !strcmp((*r)->name, data); else return 1; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 57ed6b70d295..ffbe792410d1 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -863,6 +863,7 @@ static void loop_config_discard(struct loop_device *lo) struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; struct request_queue *q = lo->lo_queue; + u32 granularity, max_discard_sectors; /* * If the backing device is a block device, mirror its zeroing @@ -875,11 +876,10 @@ static void loop_config_discard(struct loop_device *lo) struct request_queue *backingq; backingq = bdev_get_queue(inode->i_bdev); - blk_queue_max_discard_sectors(q, - backingq->limits.max_write_zeroes_sectors); - blk_queue_max_write_zeroes_sectors(q, - backingq->limits.max_write_zeroes_sectors); + max_discard_sectors = backingq->limits.max_write_zeroes_sectors; + granularity = backingq->limits.discard_granularity ?: + queue_physical_block_size(backingq); /* * We use punch hole to reclaim the free space used by the @@ -888,23 +888,26 @@ static void loop_config_discard(struct loop_device *lo) * useful information. */ } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) { - q->limits.discard_granularity = 0; - q->limits.discard_alignment = 0; - blk_queue_max_discard_sectors(q, 0); - blk_queue_max_write_zeroes_sectors(q, 0); + max_discard_sectors = 0; + granularity = 0; } else { - q->limits.discard_granularity = inode->i_sb->s_blocksize; - q->limits.discard_alignment = 0; - - blk_queue_max_discard_sectors(q, UINT_MAX >> 9); - blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); + max_discard_sectors = UINT_MAX >> 9; + granularity = inode->i_sb->s_blocksize; } - if (q->limits.max_write_zeroes_sectors) + if (max_discard_sectors) { + q->limits.discard_granularity = granularity; + blk_queue_max_discard_sectors(q, max_discard_sectors); + blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - else + } else { + q->limits.discard_granularity = 0; + blk_queue_max_discard_sectors(q, 0); + blk_queue_max_write_zeroes_sectors(q, 0); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); + } + q->limits.discard_alignment = 0; } static void loop_unprepare_queue(struct loop_device *lo) @@ -1284,7 +1287,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { sync_blockdev(lo->lo_device); - kill_bdev(lo->lo_device); + invalidate_bdev(lo->lo_device); } /* I/O need to be drained during transfer transition */ @@ -1558,12 +1561,12 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) if (lo->lo_queue->limits.logical_block_size != arg) { sync_blockdev(lo->lo_device); - kill_bdev(lo->lo_device); + invalidate_bdev(lo->lo_device); } blk_mq_freeze_queue(lo->lo_queue); - /* kill_bdev should have truncated all the pages */ + /* invalidate_bdev should have truncated all the pages */ if (lo->lo_queue->limits.logical_block_size != arg && lo->lo_device->bd_inode->i_mapping->nrpages) { err = -EAGAIN; @@ -2325,6 +2328,8 @@ static void __exit loop_exit(void) range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; + mutex_lock(&loop_ctl_mutex); + idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); idr_destroy(&loop_index_idr); @@ -2332,6 +2337,8 @@ static void __exit loop_exit(void) unregister_blkdev(LOOP_MAJOR, "loop"); misc_deregister(&loop_misc); + + mutex_unlock(&loop_ctl_mutex); } module_init(loop_init); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 78181908f0df..7c577cabb9c3 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1022,25 +1022,26 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, test_bit(NBD_RT_BOUND, &config->runtime_flags))) { dev_err(disk_to_dev(nbd->disk), "Device being setup by another task"); - sockfd_put(sock); - return -EBUSY; + err = -EBUSY; + goto put_socket; + } + + nsock = kzalloc(sizeof(*nsock), GFP_KERNEL); + if (!nsock) { + err = -ENOMEM; + goto put_socket; } socks = krealloc(config->socks, (config->num_connections + 1) * sizeof(struct nbd_sock *), GFP_KERNEL); if (!socks) { - sockfd_put(sock); - return -ENOMEM; + kfree(nsock); + err = -ENOMEM; + goto put_socket; } config->socks = socks; - nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); - if (!nsock) { - sockfd_put(sock); - return -ENOMEM; - } - nsock->fallback_index = -1; nsock->dead = false; mutex_init(&nsock->tx_lock); @@ -1052,6 +1053,10 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, atomic_inc(&config->live_connections); return 0; + +put_socket: + sockfd_put(sock); + return err; } static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) @@ -1344,6 +1349,8 @@ static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) nbd->tag_set.timeout = timeout * HZ; if (timeout) blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); + else + blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ); } /* Must be called with config_lock held */ diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index c4454cfc6d53..13eae973eaea 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1072,7 +1072,7 @@ static int null_handle_rq(struct nullb_cmd *cmd) len = bvec.bv_len; err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, op_is_write(req_op(rq)), sector, - req_op(rq) & REQ_FUA); + rq->cmd_flags & REQ_FUA); if (err) { spin_unlock_irq(&nullb->lock); return err; diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index c5c6487a19d5..7b55811c2a81 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -454,7 +454,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) queue->queuedata = dev; blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); - blk_queue_segment_boundary(queue, -1UL); blk_queue_dma_alignment(queue, dev->blk_size-1); blk_queue_logical_block_size(queue, dev->blk_size); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 274beda31c35..bf2f0373a3b2 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -5280,6 +5280,9 @@ static ssize_t rbd_config_info_show(struct device *dev, { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return sprintf(buf, "%s\n", rbd_dev->config_info); } @@ -5391,6 +5394,9 @@ static ssize_t rbd_image_refresh(struct device *dev, struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + ret = rbd_dev_refresh(rbd_dev); if (ret) return ret; @@ -7059,6 +7065,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, struct rbd_client *rbdc; int rc; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (!try_module_get(THIS_MODULE)) return -ENODEV; @@ -7208,6 +7217,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus, bool force = false; int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + dev_id = -1; opt_buf[0] = '\0'; sscanf(buf, "%d %5s", &dev_id, opt_buf); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 0cf2fe290230..2eeb2bcb488d 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -205,16 +205,31 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap) if (!range) return -ENOMEM; - __rq_for_each_bio(bio, req) { - u64 sector = bio->bi_iter.bi_sector; - u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; + /* + * Single max discard segment means multi-range discard isn't + * supported, and block layer only runs contiguity merge like + * normal RW request. So we can't reply on bio for retrieving + * each range info. + */ + if (queue_max_discard_segments(req->q) == 1) { + range[0].flags = cpu_to_le32(flags); + range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req)); + range[0].sector = cpu_to_le64(blk_rq_pos(req)); + n = 1; + } else { + __rq_for_each_bio(bio, req) { + u64 sector = bio->bi_iter.bi_sector; + u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; - range[n].flags = cpu_to_le32(flags); - range[n].num_sectors = cpu_to_le32(num_sectors); - range[n].sector = cpu_to_le64(sector); - n++; + range[n].flags = cpu_to_le32(flags); + range[n].num_sectors = cpu_to_le32(num_sectors); + range[n].sector = cpu_to_le64(sector); + n++; + } } + WARN_ON_ONCE(n != segments); + req->special_vec.bv_page = virt_to_page(range); req->special_vec.bv_offset = offset_in_page(range); req->special_vec.bv_len = sizeof(*range) * segments; @@ -990,6 +1005,7 @@ out_put_disk: put_disk(vblk->disk); out_free_vq: vdev->config->del_vqs(vdev); + kfree(vblk->vqs); out_free_vblk: kfree(vblk); out_free_index: diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 1bf4a908a0bd..36d49159140f 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -2023,7 +2023,8 @@ static ssize_t hot_add_show(struct class *class, return ret; return scnprintf(buf, PAGE_SIZE, "%d\n", ret); } -static CLASS_ATTR_RO(hot_add); +static struct class_attribute class_attr_hot_add = + __ATTR(hot_add, 0400, hot_add_show, NULL); static ssize_t hot_remove_store(struct class *class, struct class_attribute *attr, diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index f02a4bdc0ca7..dd29d687cd38 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -329,6 +329,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { { 0x410e, "BCM43341B0" }, /* 002.001.014 */ { 0x4204, "BCM2076B1" }, /* 002.002.004 */ { 0x4406, "BCM4324B3" }, /* 002.004.006 */ + { 0x4606, "BCM4324B5" }, /* 002.006.006 */ { 0x6109, "BCM4335C0" }, /* 003.001.009 */ { 0x610c, "BCM4354" }, /* 003.001.012 */ { 0x2122, "BCM4343A0" }, /* 001.001.034 */ @@ -343,6 +344,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { }; static const struct bcm_subver_table bcm_usb_subver_table[] = { + { 0x2105, "BCM20703A1" }, /* 001.001.005 */ { 0x210b, "BCM43142A0" }, /* 001.001.011 */ { 0x2112, "BCM4314A0" }, /* 001.001.018 */ { 0x2118, "BCM20702A0" }, /* 001.001.024 */ diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 0f3a020703ab..4c7978cb1786 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -328,7 +328,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = { .helper = NULL, - .firmware = "mrvl/sd8977_uapsta.bin", + .firmware = "mrvl/sdsd8977_combo_v2.bin", .reg = &btmrvl_reg_8977, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, @@ -346,7 +346,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = { static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = { .helper = NULL, - .firmware = "mrvl/sd8997_uapsta.bin", + .firmware = "mrvl/sdsd8997_combo_v4.bin", .reg = &btmrvl_reg_8997, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, @@ -1831,6 +1831,6 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); -MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin"); MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin"); -MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sdsd8997_combo_v4.bin"); diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 813338288453..b7de7cb8cca9 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -684,7 +684,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) const u8 *fw_ptr; size_t fw_size; int err, dlen; - u8 flag; + u8 flag, param; err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { @@ -692,6 +692,20 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) return err; } + /* Power on data RAM the firmware relies on. */ + param = 1; + wmt_params.op = MTK_WMT_FUNC_CTRL; + wmt_params.flag = 3; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err); + return err; + } + fw_ptr = fw->data; fw_size = fw->size; diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index e11169ad8247..8a81fbca5c9d 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -1015,7 +1015,7 @@ static int btmtkuart_probe(struct serdev_device *serdev) if (btmtkuart_is_standalone(bdev)) { err = clk_prepare_enable(bdev->osc); if (err < 0) - return err; + goto err_hci_free_dev; if (bdev->boot) { gpiod_set_value_cansleep(bdev->boot, 1); @@ -1028,10 +1028,8 @@ static int btmtkuart_probe(struct serdev_device *serdev) /* Power on */ err = regulator_enable(bdev->vcc); - if (err < 0) { - clk_disable_unprepare(bdev->osc); - return err; - } + if (err < 0) + goto err_clk_disable_unprepare; /* Reset if the reset-gpios is available otherwise the board * -level design should be guaranteed. @@ -1063,7 +1061,6 @@ static int btmtkuart_probe(struct serdev_device *serdev) err = hci_register_dev(hdev); if (err < 0) { dev_err(&serdev->dev, "Can't register HCI device\n"); - hci_free_dev(hdev); goto err_regulator_disable; } @@ -1072,6 +1069,11 @@ static int btmtkuart_probe(struct serdev_device *serdev) err_regulator_disable: if (btmtkuart_is_standalone(bdev)) regulator_disable(bdev->vcc); +err_clk_disable_unprepare: + if (btmtkuart_is_standalone(bdev)) + clk_disable_unprepare(bdev->osc); +err_hci_free_dev: + hci_free_dev(hdev); return err; } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 9c3b063e1a1f..f3f0529564da 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2792,7 +2792,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) const u8 *fw_ptr; size_t fw_size; int err, dlen; - u8 flag; + u8 flag, param; err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { @@ -2800,6 +2800,20 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) return err; } + /* Power on data RAM the firmware relies on. */ + param = 1; + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 3; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err); + return err; + } + fw_ptr = fw->data; fw_size = fw->size; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 7646636f2d18..94ed734c1d7e 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -107,6 +107,7 @@ struct bcm_device { u32 oper_speed; int irq; bool irq_active_low; + bool irq_acquired; #ifdef CONFIG_PM struct hci_uart *hu; @@ -319,6 +320,8 @@ static int bcm_request_irq(struct bcm_data *bcm) goto unlock; } + bdev->irq_acquired = true; + device_init_wakeup(bdev->dev, true); pm_runtime_set_autosuspend_delay(bdev->dev, @@ -487,7 +490,7 @@ static int bcm_close(struct hci_uart *hu) } if (bdev) { - if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) { + if (IS_ENABLED(CONFIG_PM) && bdev->irq_acquired) { devm_free_irq(bdev->dev, bdev->irq, bdev); device_init_wakeup(bdev->dev, false); pm_runtime_disable(bdev->dev); diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index dacf297baf59..5df0651b6cd5 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -790,7 +790,7 @@ static int h5_serdev_probe(struct serdev_device *serdev) if (!h5) return -ENOMEM; - set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags); + set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags); h5->hu = &h5->serdev_hu; h5->serdev_hu.serdev = serdev; diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index 4652896d4990..ad2f26cb2622 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -357,7 +357,8 @@ void hci_uart_unregister_device(struct hci_uart *hu) struct hci_dev *hdev = hu->hdev; clear_bit(HCI_UART_PROTO_READY, &hu->flags); - hci_unregister_dev(hdev); + if (test_bit(HCI_UART_REGISTERED, &hu->flags)) + hci_unregister_dev(hdev); hci_free_dev(hdev); cancel_work_sync(&hu->write_work); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f0bc0841cbc4..770a780dfa54 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -70,11 +70,13 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = { * @child_needs_resume: runtime resume needed for child on resume from suspend * @disable_on_idle: status flag used for disabling modules with resets * @idle_work: work structure used to perform delayed idle on a module - * @clk_enable_quirk: module specific clock enable quirk - * @clk_disable_quirk: module specific clock disable quirk + * @pre_reset_quirk: module specific pre-reset quirk + * @post_reset_quirk: module specific post-reset quirk * @reset_done_quirk: module specific reset done quirk * @module_enable_quirk: module specific enable quirk * @module_disable_quirk: module specific disable quirk + * @module_unlock_quirk: module specific sysconfig unlock quirk + * @module_lock_quirk: module specific sysconfig lock quirk */ struct sysc { struct device *dev; @@ -97,11 +99,13 @@ struct sysc { unsigned int needs_resume:1; unsigned int child_needs_resume:1; struct delayed_work idle_work; - void (*clk_enable_quirk)(struct sysc *sysc); - void (*clk_disable_quirk)(struct sysc *sysc); + void (*pre_reset_quirk)(struct sysc *sysc); + void (*post_reset_quirk)(struct sysc *sysc); void (*reset_done_quirk)(struct sysc *sysc); void (*module_enable_quirk)(struct sysc *sysc); void (*module_disable_quirk)(struct sysc *sysc); + void (*module_unlock_quirk)(struct sysc *sysc); + void (*module_lock_quirk)(struct sysc *sysc); }; static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, @@ -182,6 +186,34 @@ static u32 sysc_read_sysstatus(struct sysc *ddata) return sysc_read(ddata, offset); } +/* Poll on reset status */ +static int sysc_wait_softreset(struct sysc *ddata) +{ + u32 sysc_mask, syss_done, rstval; + int syss_offset, error = 0; + + syss_offset = ddata->offsets[SYSC_SYSSTATUS]; + sysc_mask = BIT(ddata->cap->regbits->srst_shift); + + if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED) + syss_done = 0; + else + syss_done = ddata->cfg.syss_mask; + + if (syss_offset >= 0) { + error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata, + rstval, (rstval & ddata->cfg.syss_mask) == + syss_done, 100, MAX_MODULE_SOFTRESET_WAIT); + + } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { + error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata, + rstval, !(rstval & sysc_mask), + 100, MAX_MODULE_SOFTRESET_WAIT); + } + + return error; +} + static int sysc_add_named_clock_from_child(struct sysc *ddata, const char *name, const char *optfck_name) @@ -863,6 +895,22 @@ static void sysc_show_registers(struct sysc *ddata) buf); } +/** + * sysc_write_sysconfig - handle sysconfig quirks for register write + * @ddata: device driver data + * @value: register value + */ +static void sysc_write_sysconfig(struct sysc *ddata, u32 value) +{ + if (ddata->module_unlock_quirk) + ddata->module_unlock_quirk(ddata); + + sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value); + + if (ddata->module_lock_quirk) + ddata->module_lock_quirk(ddata); +} + #define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1) #define SYSC_CLOCACT_ICK 2 @@ -872,18 +920,47 @@ static int sysc_enable_module(struct device *dev) struct sysc *ddata; const struct sysc_regbits *regbits; u32 reg, idlemodes, best_mode; + int error; ddata = dev_get_drvdata(dev); + + /* + * Some modules like DSS reset automatically on idle. Enable optional + * reset clocks and wait for OCP softreset to complete. + */ + if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) { + error = sysc_enable_opt_clocks(ddata); + if (error) { + dev_err(ddata->dev, + "Optional clocks failed for enable: %i\n", + error); + return error; + } + } + error = sysc_wait_softreset(ddata); + if (error) + dev_warn(ddata->dev, "OCP softreset timed out\n"); + if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) + sysc_disable_opt_clocks(ddata); + + /* + * Some subsystem private interconnects, like DSS top level module, + * need only the automatic OCP softreset handling with no sysconfig + * register bits to configure. + */ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV) return 0; regbits = ddata->cap->regbits; reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); - /* Set CLOCKACTIVITY, we only use it for ick */ + /* + * Set CLOCKACTIVITY, we only use it for ick. And we only configure it + * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware + * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag. + */ if (regbits->clkact_shift >= 0 && - (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT || - ddata->cfg.sysc_val & BIT(regbits->clkact_shift))) + (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT)) reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift; /* Set SIDLE mode */ @@ -909,7 +986,7 @@ static int sysc_enable_module(struct device *dev) reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); reg |= best_mode << regbits->sidle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); set_midle: /* Set MIDLE mode */ @@ -928,16 +1005,19 @@ set_midle: reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); reg |= best_mode << regbits->midle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); set_autoidle: /* Autoidle bit must enabled separately if available */ if (regbits->autoidle_shift >= 0 && ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) { reg |= 1 << regbits->autoidle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); } + /* Flush posted write */ + sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); + if (ddata->module_enable_quirk) ddata->module_enable_quirk(ddata); @@ -993,7 +1073,7 @@ static int sysc_disable_module(struct device *dev) reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift); reg |= best_mode << regbits->midle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); set_sidle: /* Set SIDLE mode */ @@ -1016,7 +1096,10 @@ set_sidle: if (regbits->autoidle_shift >= 0 && ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) reg |= 1 << regbits->autoidle_shift; - sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); + sysc_write_sysconfig(ddata, reg); + + /* Flush posted write */ + sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); return 0; } @@ -1162,7 +1245,8 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) + if (ddata->cfg.quirks & + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) return 0; return pm_runtime_force_suspend(dev); @@ -1174,7 +1258,8 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) + if (ddata->cfg.quirks & + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) return 0; return pm_runtime_force_resume(dev); @@ -1222,16 +1307,16 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff, + SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, + SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0), /* Some timers on omap4 and later */ - SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, + SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0), - SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, + SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), @@ -1244,19 +1329,27 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), /* Quirks that need to be set based on the module address */ - SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff, + SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT | SYSC_QUIRK_SWSUP_SIDLE), /* Quirks that need to be set based on detected module */ - SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff, + SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff, SYSC_MODULE_QUIRK_AESS), - SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff, + SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), - SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff, + SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, + SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff, + SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff, + SYSC_QUIRK_OPT_CLKS_IN_RESET), + SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), - SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff, + SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, SYSC_QUIRK_CLKDM_NOAUTO), + SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff, + SYSC_QUIRK_OPT_CLKS_NEEDED), SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff, SYSC_MODULE_QUIRK_HDQ1W), SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff, @@ -1269,12 +1362,22 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_MODULE_QUIRK_I2C), SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0, SYSC_MODULE_QUIRK_I2C), - SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0), - SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, + SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0), + SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, SYSC_MODULE_QUIRK_SGX), + SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0, + SYSC_MODULE_QUIRK_RTC_UNLOCK), + SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), - SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff, + SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, SYSC_MODULE_QUIRK_WDT), @@ -1283,57 +1386,66 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE), #ifdef DEBUG - SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0), - SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0), - SYSC_QUIRK("cm", 0, 0, -1, -1, 0x40000301, 0xffffffff, 0), - SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), + SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0), + SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0), + SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0), + SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, 0xffff00f0, 0), - SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0), - SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0), - SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0), - SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), + SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0), + SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0), + SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), + SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), + SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0), + SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0), + SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), + SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0), + SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), + SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0), + SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0), SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), - SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), - SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0), - SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0), + SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0), + SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0), + SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0), + SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0), SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0), - SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0), - SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0), - SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0), - SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0), - SYSC_QUIRK("mcbsp", 0, -1, 0x8c, -1, 0, 0, 0), - SYSC_QUIRK("mcspi", 0, 0, 0x10, -1, 0x40300a0b, 0xffff00ff, 0), + SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0), + SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff, 0), + SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0), + SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0), + SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0), + SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0), SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0), - SYSC_QUIRK("mailbox", 0, 0, 0x10, -1, 0x00000400, 0xffffffff, 0), - SYSC_QUIRK("m3", 0, 0, -1, -1, 0x5f580105, 0x0fff0f00, 0), + SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0), + SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0), SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0), - SYSC_QUIRK("ocp2scp", 0, 0, -1, -1, 0x50060007, 0xffffffff, 0), - SYSC_QUIRK("padconf", 0, 0, 0x10, -1, 0x4fff0800, 0xffffffff, 0), - SYSC_QUIRK("padconf", 0, 0, -1, -1, 0x40001100, 0xffffffff, 0), - SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000100, 0xffffffff, 0), - SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x00004102, 0xffffffff, 0), - SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000400, 0xffffffff, 0), - SYSC_QUIRK("scm", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), - SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4e8b0100, 0xffffffff, 0), - SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4f000100, 0xffffffff, 0), - SYSC_QUIRK("scm", 0, 0, -1, -1, 0x40000900, 0xffffffff, 0), - SYSC_QUIRK("scrm", 0, 0, -1, -1, 0x00000010, 0xffffffff, 0), - SYSC_QUIRK("sdio", 0, 0, 0x10, -1, 0x40202301, 0xffff0ff0, 0), + SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0), + SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0), + SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0), + SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0), + SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0), + SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0), + SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), + SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0), + SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0), + SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0), + SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0), + SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0), + SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0), + SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0), SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0), SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0), - SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40000902, 0xffffffff, 0), - SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40002903, 0xffffffff, 0), - SYSC_QUIRK("spinlock", 0, 0, 0x10, -1, 0x50020000, 0xffffffff, 0), - SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -1, 0x00000020, 0xffffffff, 0), - SYSC_QUIRK("rtc", 0, 0x74, 0x78, -1, 0x4eb01908, 0xffff00f0, 0), - SYSC_QUIRK("timer32k", 0, 0, 0x4, -1, 0x00000060, 0xffffffff, 0), + SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0), + SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0), + SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0), + SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0), + SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0), + SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0), SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0), - SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0), - SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0), - SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0), + SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0), + SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0), #endif }; @@ -1355,16 +1467,13 @@ static void sysc_init_early_quirks(struct sysc *ddata) if (q->base != ddata->module_pa) continue; - if (q->rev_offset >= 0 && - q->rev_offset != ddata->offsets[SYSC_REVISION]) + if (q->rev_offset != ddata->offsets[SYSC_REVISION]) continue; - if (q->sysc_offset >= 0 && - q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) + if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) continue; - if (q->syss_offset >= 0 && - q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) + if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) continue; ddata->name = q->name; @@ -1384,16 +1493,13 @@ static void sysc_init_revision_quirks(struct sysc *ddata) if (q->base && q->base != ddata->module_pa) continue; - if (q->rev_offset >= 0 && - q->rev_offset != ddata->offsets[SYSC_REVISION]) + if (q->rev_offset != ddata->offsets[SYSC_REVISION]) continue; - if (q->sysc_offset >= 0 && - q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) + if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) continue; - if (q->syss_offset >= 0 && - q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) + if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) continue; if (q->revision == ddata->revision || @@ -1424,7 +1530,7 @@ static void sysc_module_enable_quirk_aess(struct sysc *ddata) sysc_write(ddata, offset, 1); } -/* I2C needs extra enable bit toggling for reset */ +/* I2C needs to be disabled for reset */ static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable) { int offset; @@ -1445,14 +1551,48 @@ static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable) sysc_write(ddata, offset, val); } -static void sysc_clk_enable_quirk_i2c(struct sysc *ddata) +static void sysc_pre_reset_quirk_i2c(struct sysc *ddata) +{ + sysc_clk_quirk_i2c(ddata, false); +} + +static void sysc_post_reset_quirk_i2c(struct sysc *ddata) { sysc_clk_quirk_i2c(ddata, true); } -static void sysc_clk_disable_quirk_i2c(struct sysc *ddata) +/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */ +static void sysc_quirk_rtc(struct sysc *ddata, bool lock) { - sysc_clk_quirk_i2c(ddata, false); + u32 val, kick0_val = 0, kick1_val = 0; + unsigned long flags; + int error; + + if (!lock) { + kick0_val = 0x83e70b13; + kick1_val = 0x95a4f1e0; + } + + local_irq_save(flags); + /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */ + error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val, + !(val & BIT(0)), 100, 50); + if (error) + dev_warn(ddata->dev, "rtc busy timeout\n"); + /* Now we have ~15 microseconds to read/write various registers */ + sysc_write(ddata, 0x6c, kick0_val); + sysc_write(ddata, 0x70, kick1_val); + local_irq_restore(flags); +} + +static void sysc_module_unlock_quirk_rtc(struct sysc *ddata) +{ + sysc_quirk_rtc(ddata, false); +} + +static void sysc_module_lock_quirk_rtc(struct sysc *ddata) +{ + sysc_quirk_rtc(ddata, true); } /* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */ @@ -1494,14 +1634,14 @@ static void sysc_init_module_quirks(struct sysc *ddata) return; if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) { - ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w; + ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w; return; } if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) { - ddata->clk_enable_quirk = sysc_clk_enable_quirk_i2c; - ddata->clk_disable_quirk = sysc_clk_disable_quirk_i2c; + ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c; + ddata->post_reset_quirk = sysc_post_reset_quirk_i2c; return; } @@ -1509,6 +1649,13 @@ static void sysc_init_module_quirks(struct sysc *ddata) if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS) ddata->module_enable_quirk = sysc_module_enable_quirk_aess; + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) { + ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc; + ddata->module_lock_quirk = sysc_module_lock_quirk_rtc; + + return; + } + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX) ddata->module_enable_quirk = sysc_module_enable_quirk_sgx; @@ -1602,11 +1749,10 @@ static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset) */ static int sysc_reset(struct sysc *ddata) { - int sysc_offset, syss_offset, sysc_val, rstval, error = 0; - u32 sysc_mask, syss_done; + int sysc_offset, sysc_val, error; + u32 sysc_mask; sysc_offset = ddata->offsets[SYSC_SYSCONFIG]; - syss_offset = ddata->offsets[SYSC_SYSSTATUS]; if (ddata->legacy_mode || sysc_offset < 0 || ddata->cap->regbits->srst_shift < 0 || @@ -1615,13 +1761,8 @@ static int sysc_reset(struct sysc *ddata) sysc_mask = BIT(ddata->cap->regbits->srst_shift); - if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED) - syss_done = 0; - else - syss_done = ddata->cfg.syss_mask; - - if (ddata->clk_disable_quirk) - ddata->clk_disable_quirk(ddata); + if (ddata->pre_reset_quirk) + ddata->pre_reset_quirk(ddata); sysc_val = sysc_read_sysconfig(ddata); sysc_val |= sysc_mask; @@ -1631,21 +1772,12 @@ static int sysc_reset(struct sysc *ddata) usleep_range(ddata->cfg.srst_udelay, ddata->cfg.srst_udelay * 2); - if (ddata->clk_enable_quirk) - ddata->clk_enable_quirk(ddata); + if (ddata->post_reset_quirk) + ddata->post_reset_quirk(ddata); - /* Poll on reset status */ - if (syss_offset >= 0) { - error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval, - (rstval & ddata->cfg.syss_mask) == - syss_done, - 100, MAX_MODULE_SOFTRESET_WAIT); - - } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { - error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval, - !(rstval & sysc_mask), - 100, MAX_MODULE_SOFTRESET_WAIT); - } + error = sysc_wait_softreset(ddata); + if (error) + dev_warn(ddata->dev, "OCP softreset timed out\n"); if (ddata->reset_done_quirk) ddata->reset_done_quirk(ddata); diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index b161bdf60000..0941d38b2d32 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -304,8 +304,10 @@ static int intel_gtt_setup_scratch_page(void) if (intel_private.needs_dmar) { dma_addr = pci_map_page(intel_private.pcidev, page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) + if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) { + __free_page(page); return -EINVAL; + } intel_private.scratch_page_dma = dma_addr; } else diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c index a67430010aa6..5c7d3dfcfdd0 100644 --- a/drivers/char/hw_random/ks-sa-rng.c +++ b/drivers/char/hw_random/ks-sa-rng.c @@ -208,6 +208,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "Failed to enable SA power-domain\n"); + pm_runtime_put_noidle(dev); pm_runtime_disable(dev); return ret; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 90f5292e2051..ac656a6d5daf 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -33,6 +33,7 @@ #include #include #include +#include #define IPMI_DRIVER_VERSION "39.2" @@ -1170,7 +1171,7 @@ static void free_user_work(struct work_struct *work) remove_work); cleanup_srcu_struct(&user->release_barrier); - kfree(user); + vfree(user); } int ipmi_create_user(unsigned int if_num, @@ -1202,7 +1203,7 @@ int ipmi_create_user(unsigned int if_num, if (rv) return rv; - new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); + new_user = vzalloc(sizeof(*new_user)); if (!new_user) return -ENOMEM; @@ -1249,7 +1250,7 @@ int ipmi_create_user(unsigned int if_num, out_kfree: srcu_read_unlock(&ipmi_interfaces_srcu, index); - kfree(new_user); + vfree(new_user); return rv; } EXPORT_SYMBOL(ipmi_create_user); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 43dd0891ca1e..6b56bff9b68c 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -31,11 +31,15 @@ #include #include #include +#include +#include +#include #ifdef CONFIG_IA64 # include #endif +#define DEVMEM_MINOR 1 #define DEVPORT_MINOR 4 static inline unsigned long size_inside_page(unsigned long start, @@ -805,12 +809,65 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) return ret; } +static struct inode *devmem_inode; + +#ifdef CONFIG_IO_STRICT_DEVMEM +void revoke_devmem(struct resource *res) +{ + /* pairs with smp_store_release() in devmem_init_inode() */ + struct inode *inode = smp_load_acquire(&devmem_inode); + + /* + * Check that the initialization has completed. Losing the race + * is ok because it means drivers are claiming resources before + * the fs_initcall level of init and prevent /dev/mem from + * establishing mappings. + */ + if (!inode) + return; + + /* + * The expectation is that the driver has successfully marked + * the resource busy by this point, so devmem_is_allowed() + * should start returning false, however for performance this + * does not iterate the entire resource range. + */ + if (devmem_is_allowed(PHYS_PFN(res->start)) && + devmem_is_allowed(PHYS_PFN(res->end))) { + /* + * *cringe* iomem=relaxed says "go ahead, what's the + * worst that can happen?" + */ + return; + } + + unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1); +} +#endif + static int open_port(struct inode *inode, struct file *filp) { + int rc; + if (!capable(CAP_SYS_RAWIO)) return -EPERM; - return security_locked_down(LOCKDOWN_DEV_MEM); + rc = security_locked_down(LOCKDOWN_DEV_MEM); + if (rc) + return rc; + + if (iminor(inode) != DEVMEM_MINOR) + return 0; + + /* + * Use a unified address space to have a single point to manage + * revocations when drivers want to take over a /dev/mem mapped + * range. + */ + inode->i_mapping = devmem_inode->i_mapping; + filp->f_mapping = inode->i_mapping; + + return 0; } #define zero_lseek null_lseek @@ -885,7 +942,7 @@ static const struct memdev { fmode_t fmode; } devlist[] = { #ifdef CONFIG_DEVMEM - [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, + [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, #endif #ifdef CONFIG_DEVKMEM [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, @@ -939,6 +996,48 @@ static char *mem_devnode(struct device *dev, umode_t *mode) static struct class *mem_class; +static int devmem_fs_init_fs_context(struct fs_context *fc) +{ + return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM; +} + +static struct file_system_type devmem_fs_type = { + .name = "devmem", + .owner = THIS_MODULE, + .init_fs_context = devmem_fs_init_fs_context, + .kill_sb = kill_anon_super, +}; + +static int devmem_init_inode(void) +{ + static struct vfsmount *devmem_vfs_mount; + static int devmem_fs_cnt; + struct inode *inode; + int rc; + + rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt); + if (rc < 0) { + pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc); + return rc; + } + + inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb); + if (IS_ERR(inode)) { + rc = PTR_ERR(inode); + pr_err("Cannot allocate inode for /dev/mem: %d\n", rc); + simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt); + return rc; + } + + /* + * Publish /dev/mem initialized. + * Pairs with smp_load_acquire() in revoke_devmem(). + */ + smp_store_release(&devmem_inode, inode); + + return 0; +} + static int __init chr_dev_init(void) { int minor; @@ -960,6 +1059,8 @@ static int __init chr_dev_init(void) */ if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) continue; + if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0) + continue; device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), NULL, devlist[minor].name); diff --git a/drivers/char/random.c b/drivers/char/random.c index 8ff28c14af7e..e877c20e0ee0 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1330,6 +1330,7 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_mix(fast_pool); add_interrupt_bench(cycles); + this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]); if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 58073836b555..1838039b0333 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -386,13 +386,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->cdev.owner = THIS_MODULE; chip->cdevs.owner = THIS_MODULE; - chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.context_buf) { - rc = -ENOMEM; - goto out; - } - chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.session_buf) { + rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); + if (rc) { rc = -ENOMEM; goto out; } diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 87f449340202..1784530b8387 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -189,15 +189,6 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, goto out; } - /* atomic tpm command send and result receive. We only hold the ops - * lock during this period so that the tpm can be unregistered even if - * the char dev is held open. - */ - if (tpm_try_get_ops(priv->chip)) { - ret = -EPIPE; - goto out; - } - priv->response_length = 0; priv->response_read = false; *off = 0; @@ -211,11 +202,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, if (file->f_flags & O_NONBLOCK) { priv->command_enqueued = true; queue_work(tpm_dev_wq, &priv->async_work); - tpm_put_ops(priv->chip); mutex_unlock(&priv->buffer_mutex); return size; } + /* atomic tpm command send and result receive. We only hold the ops + * lock during this period so that the tpm can be unregistered even if + * the char dev is held open. + */ + if (tpm_try_get_ops(priv->chip)) { + ret = -EPIPE; + goto out; + } + ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 218cb496222a..37f010421a36 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -177,6 +177,9 @@ struct tpm_header { #define TPM_TAG_RQU_COMMAND 193 +/* TPM2 specific constants. */ +#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */ + struct stclear_flags_t { __be16 tag; u8 deactivated; @@ -456,7 +459,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); int tpm2_probe(struct tpm_chip *chip); int tpm2_find_cc(struct tpm_chip *chip, u32 cc); -int tpm2_init_space(struct tpm_space *space); +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size); void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space); void tpm2_flush_space(struct tpm_chip *chip); int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 982d341d8837..784b8b3cb903 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -38,18 +38,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space) } } -int tpm2_init_space(struct tpm_space *space) +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) { - space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->context_buf = kzalloc(buf_size, GFP_KERNEL); if (!space->context_buf) return -ENOMEM; - space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->session_buf = kzalloc(buf_size, GFP_KERNEL); if (space->session_buf == NULL) { kfree(space->context_buf); + /* Prevent caller getting a dangling pointer. */ + space->context_buf = NULL; return -ENOMEM; } + space->buf_size = buf_size; return 0; } @@ -311,8 +314,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, sizeof(space->context_tbl)); memcpy(&chip->work_space.session_tbl, &space->session_tbl, sizeof(space->session_tbl)); - memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE); - memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE); + memcpy(chip->work_space.context_buf, space->context_buf, + space->buf_size); + memcpy(chip->work_space.session_buf, space->session_buf, + space->buf_size); rc = tpm2_load_space(chip); if (rc) { @@ -492,7 +497,7 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->context_tbl[i], - space->context_buf, PAGE_SIZE, + space->context_buf, space->buf_size, &offset); if (rc == -ENOENT) { space->context_tbl[i] = 0; @@ -509,9 +514,8 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->session_tbl[i], - space->session_buf, PAGE_SIZE, + space->session_buf, space->buf_size, &offset); - if (rc == -ENOENT) { /* handle error saving session, just forget it */ space->session_tbl[i] = 0; @@ -557,8 +561,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, sizeof(space->context_tbl)); memcpy(&space->session_tbl, &chip->work_space.session_tbl, sizeof(space->session_tbl)); - memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE); - memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE); + memcpy(space->context_buf, chip->work_space.context_buf, + space->buf_size); + memcpy(space->session_buf, chip->work_space.session_buf, + space->buf_size); return 0; out: diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index bdcf8f25cd0d..63f6bed78d89 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -1006,7 +1006,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, return 0; out_err: - if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL)) + if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, false); tpm_tis_remove(chip); diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c index 7a0a7051a06f..eef0fb06ea83 100644 --- a/drivers/char/tpm/tpmrm-dev.c +++ b/drivers/char/tpm/tpmrm-dev.c @@ -21,7 +21,7 @@ static int tpmrm_open(struct inode *inode, struct file *file) if (priv == NULL) return -ENOMEM; - rc = tpm2_init_space(&priv->space); + rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE); if (rc) { kfree(priv); return -ENOMEM; diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 3259426f01dc..9ebce2c12c43 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2118,6 +2118,7 @@ static struct virtio_device_id id_table[] = { { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, { 0 }, }; +MODULE_DEVICE_TABLE(virtio, id_table); static unsigned int features[] = { VIRTIO_CONSOLE_F_SIZE, @@ -2130,6 +2131,7 @@ static struct virtio_device_id rproc_serial_id_table[] = { #endif { 0 }, }; +MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table); static unsigned int rproc_serial_features[] = { }; @@ -2282,6 +2284,5 @@ static void __exit fini(void) module_init(init); module_exit(fini); -MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio console driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c index e2007ac4d235..0eb83a0b70bc 100644 --- a/drivers/clk/actions/owl-s500.c +++ b/drivers/clk/actions/owl-s500.c @@ -183,7 +183,7 @@ static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0); static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0); /* divider clocks */ -static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0); +static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0); static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0); /* factor clocks */ diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 802e488fd3c3..45420b514149 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -314,6 +314,7 @@ struct bcm2835_cprman { struct device *dev; void __iomem *regs; spinlock_t regs_lock; /* spinlock for all clocks */ + unsigned int soc; /* * Real names of cprman clock parents looked up through @@ -525,6 +526,20 @@ static int bcm2835_pll_is_on(struct clk_hw *hw) A2W_PLL_CTRL_PRST_DISABLE; } +static u32 bcm2835_pll_get_prediv_mask(struct bcm2835_cprman *cprman, + const struct bcm2835_pll_data *data) +{ + /* + * On BCM2711 there isn't a pre-divisor available in the PLL feedback + * loop. Bits 13:14 of ANA1 (PLLA,PLLB,PLLC,PLLD) have been re-purposed + * for to for VCO RANGE bits. + */ + if (cprman->soc & SOC_BCM2711) + return 0; + + return data->ana->fb_prediv_mask; +} + static void bcm2835_pll_choose_ndiv_and_fdiv(unsigned long rate, unsigned long parent_rate, u32 *ndiv, u32 *fdiv) @@ -582,7 +597,7 @@ static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw, ndiv = (a2wctrl & A2W_PLL_CTRL_NDIV_MASK) >> A2W_PLL_CTRL_NDIV_SHIFT; pdiv = (a2wctrl & A2W_PLL_CTRL_PDIV_MASK) >> A2W_PLL_CTRL_PDIV_SHIFT; using_prediv = cprman_read(cprman, data->ana_reg_base + 4) & - data->ana->fb_prediv_mask; + bcm2835_pll_get_prediv_mask(cprman, data); if (using_prediv) { ndiv *= 2; @@ -665,6 +680,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw); struct bcm2835_cprman *cprman = pll->cprman; const struct bcm2835_pll_data *data = pll->data; + u32 prediv_mask = bcm2835_pll_get_prediv_mask(cprman, data); bool was_using_prediv, use_fb_prediv, do_ana_setup_first; u32 ndiv, fdiv, a2w_ctl; u32 ana[4]; @@ -682,7 +698,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, for (i = 3; i >= 0; i--) ana[i] = cprman_read(cprman, data->ana_reg_base + i * 4); - was_using_prediv = ana[1] & data->ana->fb_prediv_mask; + was_using_prediv = ana[1] & prediv_mask; ana[0] &= ~data->ana->mask0; ana[0] |= data->ana->set0; @@ -692,10 +708,10 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, ana[3] |= data->ana->set3; if (was_using_prediv && !use_fb_prediv) { - ana[1] &= ~data->ana->fb_prediv_mask; + ana[1] &= ~prediv_mask; do_ana_setup_first = true; } else if (!was_using_prediv && use_fb_prediv) { - ana[1] |= data->ana->fb_prediv_mask; + ana[1] |= prediv_mask; do_ana_setup_first = false; } else { do_ana_setup_first = true; @@ -1448,13 +1464,13 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, return &clock->hw; } -static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman, +static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman, const struct bcm2835_gate_data *data) { - return clk_register_gate(cprman->dev, data->name, data->parent, - CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, - cprman->regs + data->ctl_reg, - CM_GATE_BIT, 0, &cprman->regs_lock); + return clk_hw_register_gate(cprman->dev, data->name, data->parent, + CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, + cprman->regs + data->ctl_reg, + CM_GATE_BIT, 0, &cprman->regs_lock); } typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman, @@ -2234,6 +2250,7 @@ static int bcm2835_clk_probe(struct platform_device *pdev) platform_set_drvdata(pdev, cprman); cprman->onecell.num = asize; + cprman->soc = pdata->soc; hws = cprman->onecell.hws; for (i = 0; i < asize; i++) { diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c index 98e884957db8..911a29bd744e 100644 --- a/drivers/clk/bcm/clk-bcm63xx-gate.c +++ b/drivers/clk/bcm/clk-bcm63xx-gate.c @@ -155,6 +155,7 @@ static int clk_bcm63xx_probe(struct platform_device *pdev) for (entry = table; entry->name; entry++) maxbit = max_t(u8, maxbit, entry->bit); + maxbit++; hw = devm_kzalloc(&pdev->dev, struct_size(hw, data.hws, maxbit), GFP_KERNEL); diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c index b1318e6b655b..7015974f24b4 100644 --- a/drivers/clk/clk-ast2600.c +++ b/drivers/clk/clk-ast2600.c @@ -130,6 +130,18 @@ static const struct clk_div_table ast2600_eclk_div_table[] = { { 0 } }; +static const struct clk_div_table ast2600_emmc_extclk_div_table[] = { + { 0x0, 2 }, + { 0x1, 4 }, + { 0x2, 6 }, + { 0x3, 8 }, + { 0x4, 10 }, + { 0x5, 12 }, + { 0x6, 14 }, + { 0x7, 16 }, + { 0 } +}; + static const struct clk_div_table ast2600_mac_div_table[] = { { 0x0, 4 }, { 0x1, 4 }, @@ -389,6 +401,11 @@ static struct clk_hw *aspeed_g6_clk_hw_register_gate(struct device *dev, return hw; } +static const char *const emmc_extclk_parent_names[] = { + "emmc_extclk_hpll_in", + "mpll", +}; + static const char * const vclk_parent_names[] = { "dpll", "d1pll", @@ -458,16 +475,32 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev) return PTR_ERR(hw); aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw; - /* EMMC ext clock divider */ - hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "hpll", 0, - scu_g6_base + ASPEED_G6_CLK_SELECTION1, 15, 0, - &aspeed_g6_clk_lock); + /* EMMC ext clock */ + hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll", + 0, 1, 2); if (IS_ERR(hw)) return PTR_ERR(hw); - hw = clk_hw_register_divider_table(dev, "emmc_extclk", "emmc_extclk_gate", 0, - scu_g6_base + ASPEED_G6_CLK_SELECTION1, 12, 3, 0, - ast2600_div_table, - &aspeed_g6_clk_lock); + + hw = clk_hw_register_mux(dev, "emmc_extclk_mux", + emmc_extclk_parent_names, + ARRAY_SIZE(emmc_extclk_parent_names), 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1, + 0, &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux", + 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1, + 15, 0, &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hw = clk_hw_register_divider_table(dev, "emmc_extclk", + "emmc_extclk_gate", 0, + scu_g6_base + + ASPEED_G6_CLK_SELECTION1, 12, + 3, 0, ast2600_emmc_extclk_div_table, + &aspeed_g6_clk_lock); if (IS_ERR(hw)) return PTR_ERR(hw); aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw; @@ -599,14 +632,22 @@ static const u32 ast2600_a0_axi_ahb_div_table[] = { 2, 2, 3, 5, }; -static const u32 ast2600_a1_axi_ahb_div_table[] = { - 4, 6, 2, 4, +static const u32 ast2600_a1_axi_ahb_div0_tbl[] = { + 3, 2, 3, 4, +}; + +static const u32 ast2600_a1_axi_ahb_div1_tbl[] = { + 3, 4, 6, 8, +}; + +static const u32 ast2600_a1_axi_ahb200_tbl[] = { + 3, 4, 3, 4, 2, 2, 2, 2, }; static void __init aspeed_g6_cc(struct regmap *map) { struct clk_hw *hw; - u32 val, div, chip_id, axi_div, ahb_div; + u32 val, div, divbits, chip_id, axi_div, ahb_div; clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000); @@ -636,11 +677,22 @@ static void __init aspeed_g6_cc(struct regmap *map) else axi_div = 2; + divbits = (val >> 11) & 0x3; regmap_read(map, ASPEED_G6_SILICON_REV, &chip_id); - if (chip_id & BIT(16)) - ahb_div = ast2600_a1_axi_ahb_div_table[(val >> 11) & 0x3]; - else + if (chip_id & BIT(16)) { + if (!divbits) { + ahb_div = ast2600_a1_axi_ahb200_tbl[(val >> 8) & 0x3]; + if (val & BIT(16)) + ahb_div *= 2; + } else { + if (val & BIT(16)) + ahb_div = ast2600_a1_axi_ahb_div1_tbl[divbits]; + else + ahb_div = ast2600_a1_axi_ahb_div0_tbl[divbits]; + } + } else { ahb_div = ast2600_a0_axi_ahb_div_table[(val >> 11) & 0x3]; + } hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, axi_div * ahb_div); aspeed_g6_clk_data->hws[ASPEED_CLK_AHB] = hw; diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index 886f7c5df51a..e3cdb4a282fe 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -103,6 +103,8 @@ static const struct clk_ops scmi_clk_ops = { static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) { int ret; + unsigned long min_rate, max_rate; + struct clk_init_data init = { .flags = CLK_GET_RATE_NOCACHE, .num_parents = 0, @@ -112,9 +114,23 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) sclk->hw.init = &init; ret = devm_clk_hw_register(dev, &sclk->hw); - if (!ret) - clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate, - sclk->info->range.max_rate); + if (ret) + return ret; + + if (sclk->info->rate_discrete) { + int num_rates = sclk->info->list.num_rates; + + if (num_rates <= 0) + return -EINVAL; + + min_rate = sclk->info->list.rates[0]; + max_rate = sclk->info->list.rates[num_rates - 1]; + } else { + min_rate = sclk->info->range.min_rate; + max_rate = sclk->info->range.max_rate; + } + + clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate); return ret; } diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c index 1ac11b6a47a3..2ec48d030fda 100644 --- a/drivers/clk/davinci/pll.c +++ b/drivers/clk/davinci/pll.c @@ -491,7 +491,7 @@ struct clk *davinci_pll_clk_register(struct device *dev, parent_name = postdiv_name; } - pllen = kzalloc(sizeof(*pllout), GFP_KERNEL); + pllen = kzalloc(sizeof(*pllen), GFP_KERNEL); if (!pllen) { ret = -ENOMEM; goto err_unregister_postdiv; diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c index 76f9cd039195..14e127e9a740 100644 --- a/drivers/clk/mediatek/clk-mux.c +++ b/drivers/clk/mediatek/clk-mux.c @@ -160,7 +160,7 @@ struct clk *mtk_clk_register_mux(const struct mtk_mux *mux, spinlock_t *lock) { struct mtk_clk_mux *clk_mux; - struct clk_init_data init; + struct clk_init_data init = {}; struct clk *clk; clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL); diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 8856ce476ccf..082178a0f41a 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -1071,7 +1071,7 @@ static struct clk_regmap meson8b_vid_pll_in_sel = { * Meson8m2: vid2_pll */ .parent_hws = (const struct clk_hw *[]) { - &meson8b_hdmi_pll_dco.hw + &meson8b_hdmi_pll_lvds_out.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -1207,7 +1207,7 @@ static struct clk_regmap meson8b_vclk_in_en = { static struct clk_regmap meson8b_vclk_div1_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 0, }, .hw.init = &(struct clk_init_data){ @@ -1237,7 +1237,7 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = { static struct clk_regmap meson8b_vclk_div2_div_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 1, }, .hw.init = &(struct clk_init_data){ @@ -1267,7 +1267,7 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = { static struct clk_regmap meson8b_vclk_div4_div_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 2, }, .hw.init = &(struct clk_init_data){ @@ -1297,7 +1297,7 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = { static struct clk_regmap meson8b_vclk_div6_div_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 3, }, .hw.init = &(struct clk_init_data){ @@ -1327,7 +1327,7 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = { static struct clk_regmap meson8b_vclk_div12_div_gate = { .data = &(struct clk_regmap_gate_data){ - .offset = HHI_VID_CLK_DIV, + .offset = HHI_VID_CLK_CNTL, .bit_idx = 4, }, .hw.init = &(struct clk_init_data){ @@ -1910,6 +1910,13 @@ static struct clk_regmap meson8b_mali = { }, }; +static const struct reg_sequence meson8m2_gp_pll_init_regs[] = { + { .reg = HHI_GP_PLL_CNTL2, .def = 0x59c88000 }, + { .reg = HHI_GP_PLL_CNTL3, .def = 0xca463823 }, + { .reg = HHI_GP_PLL_CNTL4, .def = 0x0286a027 }, + { .reg = HHI_GP_PLL_CNTL5, .def = 0x00003000 }, +}; + static const struct pll_params_table meson8m2_gp_pll_params_table[] = { PLL_PARAMS(182, 3), { /* sentinel */ }, @@ -1943,6 +1950,8 @@ static struct clk_regmap meson8m2_gp_pll_dco = { .width = 1, }, .table = meson8m2_gp_pll_params_table, + .init_regs = meson8m2_gp_pll_init_regs, + .init_count = ARRAY_SIZE(meson8m2_gp_pll_init_regs), }, .hw.init = &(struct clk_init_data){ .name = "gp_pll_dco", @@ -3491,54 +3500,87 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = { static const struct meson8b_clk_reset_line { u32 reg; u8 bit_idx; + bool active_low; } meson8b_clk_reset_bits[] = { [CLKC_RESET_L2_CACHE_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 30 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 30, + .active_low = false, }, [CLKC_RESET_AXI_64_TO_128_BRIDGE_A5_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 29 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 29, + .active_low = false, }, [CLKC_RESET_SCU_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 28 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 28, + .active_low = false, }, [CLKC_RESET_CPU3_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 27 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 27, + .active_low = false, }, [CLKC_RESET_CPU2_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 26 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 26, + .active_low = false, }, [CLKC_RESET_CPU1_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 25 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 25, + .active_low = false, }, [CLKC_RESET_CPU0_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 24 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 24, + .active_low = false, }, [CLKC_RESET_A5_GLOBAL_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 18 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 18, + .active_low = false, }, [CLKC_RESET_A5_AXI_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 17 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 17, + .active_low = false, }, [CLKC_RESET_A5_ABP_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 16 + .reg = HHI_SYS_CPU_CLK_CNTL0, + .bit_idx = 16, + .active_low = false, }, [CLKC_RESET_AXI_64_TO_128_BRIDGE_MMC_SOFT_RESET] = { - .reg = HHI_SYS_CPU_CLK_CNTL1, .bit_idx = 30 + .reg = HHI_SYS_CPU_CLK_CNTL1, + .bit_idx = 30, + .active_low = false, }, [CLKC_RESET_VID_CLK_CNTL_SOFT_RESET] = { - .reg = HHI_VID_CLK_CNTL, .bit_idx = 15 + .reg = HHI_VID_CLK_CNTL, + .bit_idx = 15, + .active_low = false, }, [CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_POST] = { - .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 7 + .reg = HHI_VID_DIVIDER_CNTL, + .bit_idx = 7, + .active_low = false, }, [CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_PRE] = { - .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 3 + .reg = HHI_VID_DIVIDER_CNTL, + .bit_idx = 3, + .active_low = false, }, [CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_POST] = { - .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 1 + .reg = HHI_VID_DIVIDER_CNTL, + .bit_idx = 1, + .active_low = true, }, [CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_PRE] = { - .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 0 + .reg = HHI_VID_DIVIDER_CNTL, + .bit_idx = 0, + .active_low = true, }, }; @@ -3547,22 +3589,22 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev, { struct meson8b_clk_reset *meson8b_clk_reset = container_of(rcdev, struct meson8b_clk_reset, reset); - unsigned long flags; const struct meson8b_clk_reset_line *reset; + unsigned int value = 0; + unsigned long flags; if (id >= ARRAY_SIZE(meson8b_clk_reset_bits)) return -EINVAL; reset = &meson8b_clk_reset_bits[id]; + if (assert != reset->active_low) + value = BIT(reset->bit_idx); + spin_lock_irqsave(&meson_clk_lock, flags); - if (assert) - regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, - BIT(reset->bit_idx), BIT(reset->bit_idx)); - else - regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, - BIT(reset->bit_idx), 0); + regmap_update_bits(meson8b_clk_reset->regmap, reset->reg, + BIT(reset->bit_idx), value); spin_unlock_irqrestore(&meson_clk_lock, flags); diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h index c889fbeec30f..c91fb07fcb65 100644 --- a/drivers/clk/meson/meson8b.h +++ b/drivers/clk/meson/meson8b.h @@ -20,6 +20,10 @@ * [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf */ #define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */ +#define HHI_GP_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */ +#define HHI_GP_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */ +#define HHI_GP_PLL_CNTL4 0x4C /* 0x13 offset in data sheet */ +#define HHI_GP_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */ #define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */ #define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */ #define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */ diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig index 415e6906a113..76cd06f4ed62 100644 --- a/drivers/clk/mvebu/Kconfig +++ b/drivers/clk/mvebu/Kconfig @@ -42,6 +42,7 @@ config ARMADA_AP806_SYSCON config ARMADA_AP_CPU_CLK bool + select ARMADA_AP_CP_HELPER config ARMADA_CP110_SYSCON bool diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 055318f97991..a69f53e435ed 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -55,7 +55,6 @@ #define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS]) #define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE]) #define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC]) -#define PLL_CAL_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_CAL_VAL]) const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [CLK_ALPHA_PLL_TYPE_DEFAULT] = { @@ -114,7 +113,6 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [PLL_OFF_STATUS] = 0x30, [PLL_OFF_OPMODE] = 0x38, [PLL_OFF_ALPHA_VAL] = 0x40, - [PLL_OFF_CAL_VAL] = 0x44, }, }; EXPORT_SYMBOL_GPL(clk_alpha_pll_regs); diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index 96a36f6ff667..d7586e26acd8 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -143,12 +143,22 @@ static inline bool has_state_changed(struct clk_rpmh *c, u32 state) != (c->aggr_state & BIT(state)); } +static int clk_rpmh_send(struct clk_rpmh *c, enum rpmh_state state, + struct tcs_cmd *cmd, bool wait) +{ + if (wait) + return rpmh_write(c->dev, state, cmd, 1); + + return rpmh_write_async(c->dev, state, cmd, 1); +} + static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c) { struct tcs_cmd cmd = { 0 }; u32 cmd_state, on_val; enum rpmh_state state = RPMH_SLEEP_STATE; int ret; + bool wait; cmd.addr = c->res_addr; cmd_state = c->aggr_state; @@ -159,7 +169,8 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c) if (cmd_state & BIT(state)) cmd.data = on_val; - ret = rpmh_write_async(c->dev, state, &cmd, 1); + wait = cmd_state && state == RPMH_ACTIVE_ONLY_STATE; + ret = clk_rpmh_send(c, state, &cmd, wait); if (ret) { dev_err(c->dev, "set %s state of %s failed: (%d)\n", !state ? "sleep" : @@ -267,7 +278,7 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable) cmd.addr = c->res_addr; cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state); - ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1); + ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable); if (ret) { dev_err(c->dev, "set active state of %s failed: (%d)\n", c->res_name, ret); diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index 4e329a7baf2b..17e4a5a2a9fd 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -260,7 +260,7 @@ static struct clk_pll gpll0 = { .l_reg = 0x21004, .m_reg = 0x21008, .n_reg = 0x2100c, - .config_reg = 0x21014, + .config_reg = 0x21010, .mode_reg = 0x21000, .status_reg = 0x2101c, .status_bit = 17, @@ -287,7 +287,7 @@ static struct clk_pll gpll1 = { .l_reg = 0x20004, .m_reg = 0x20008, .n_reg = 0x2000c, - .config_reg = 0x20014, + .config_reg = 0x20010, .mode_reg = 0x20000, .status_reg = 0x2001c, .status_bit = 17, @@ -314,7 +314,7 @@ static struct clk_pll gpll2 = { .l_reg = 0x4a004, .m_reg = 0x4a008, .n_reg = 0x4a00c, - .config_reg = 0x4a014, + .config_reg = 0x4a010, .mode_reg = 0x4a000, .status_reg = 0x4a01c, .status_bit = 17, @@ -341,7 +341,7 @@ static struct clk_pll bimc_pll = { .l_reg = 0x23004, .m_reg = 0x23008, .n_reg = 0x2300c, - .config_reg = 0x23014, + .config_reg = 0x23010, .mode_reg = 0x23000, .status_reg = 0x2301c, .status_bit = 17, diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c index bf5730832ef3..c6fb57cd576f 100644 --- a/drivers/clk/qcom/gcc-sdm660.c +++ b/drivers/clk/qcom/gcc-sdm660.c @@ -1715,6 +1715,9 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = { static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = { .halt_reg = 0x8a004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x8a004, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x8a004, .enable_mask = BIT(0), diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index e3959ff5cb55..ee908fbfeab1 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -1615,6 +1615,38 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = { }, }; +static struct clk_branch gcc_gpu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_clk_src", + .parent_hws = (const struct clk_hw *[]){ + &gpll0.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(16), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_div_clk_src", + .parent_hws = (const struct clk_hw *[]){ + &gpll0_out_even.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_gpu_iref_clk = { .halt_reg = 0x8c010, .halt_check = BRANCH_HALT, @@ -1697,6 +1729,38 @@ static struct clk_branch gcc_npu_cfg_ahb_clk = { }, }; +static struct clk_branch gcc_npu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(18), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_gpll0_clk_src", + .parent_hws = (const struct clk_hw *[]){ + &gpll0.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(19), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_gpll0_div_clk_src", + .parent_hws = (const struct clk_hw *[]){ + &gpll0_out_even.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_npu_trig_clk = { .halt_reg = 0x4d00c, .halt_check = BRANCH_VOTED, @@ -2811,6 +2875,45 @@ static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = { }, }; +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x7501c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_rx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x750ac, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_rx_symbol_1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x75018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_tx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_ufs_card_unipro_core_clk = { .halt_reg = 0x75058, .halt_check = BRANCH_HALT, @@ -2991,6 +3094,45 @@ static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = { }, }; +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x7701c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_rx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x770ac, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_rx_symbol_1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* external clocks so add BRANCH_HALT_SKIP */ +static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x77018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_tx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_ufs_phy_unipro_core_clk = { .halt_reg = 0x77058, .halt_check = BRANCH_HALT, @@ -3331,12 +3473,16 @@ static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr, [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr, + [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr, + [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr, [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr, [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr, [GCC_NPU_AT_CLK] = &gcc_npu_at_clk.clkr, [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr, [GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr, + [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr, + [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr, [GCC_NPU_TRIG_CLK] = &gcc_npu_trig_clk.clkr, [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr, [GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr, @@ -3441,6 +3587,9 @@ static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr, [GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_card_phy_aux_hw_ctl_clk.clkr, + [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr, + [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr, + [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr, [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr, [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_unipro_core_clk_src.clkr, @@ -3458,6 +3607,9 @@ static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr, [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr, [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr, + [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr, [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr, [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr, diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 132cc96895e3..6f9612c169af 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -800,7 +800,8 @@ static int cpg_mssr_suspend_noirq(struct device *dev) /* Save module registers with bits under our control */ for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) { if (priv->smstpcr_saved[reg].mask) - priv->smstpcr_saved[reg].val = + priv->smstpcr_saved[reg].val = priv->stbyctrl ? + readb(priv->base + STBCR(reg)) : readl(priv->base + SMSTPCR(reg)); } @@ -860,8 +861,9 @@ static int cpg_mssr_resume_noirq(struct device *dev) } if (!i) - dev_warn(dev, "Failed to enable SMSTP %p[0x%x]\n", - priv->base + SMSTPCR(reg), oldval & mask); + dev_warn(dev, "Failed to enable %s%u[0x%x]\n", + priv->stbyctrl ? "STB" : "SMSTP", reg, + oldval & mask); } return 0; diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index d7243c09cc84..47d6482dda9d 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -137,7 +137,7 @@ PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" }; PNAME(mux_hdmiphy_p) = { "hdmiphy_phy", "xin24m" }; PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu", "hdmiphy_aclk_cpu" }; -PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy" "usb480m" }; +PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy", "usb480m" }; PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "hdmiphy" }; PNAME(mux_pll_src_2plls_p) = { "cpll", "gpll" }; PNAME(mux_sclk_hdmi_cec_p) = { "cpll", "gpll", "xin24m" }; diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 27fd274e92f8..dfef5f0833db 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -540,7 +540,7 @@ static const struct samsung_div_clock exynos5800_div_clks[] __initconst = { static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", - GATE_BUS_TOP, 24, 0, 0), + GATE_BUS_TOP, 24, CLK_IS_CRITICAL, 0), GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), }; @@ -940,25 +940,25 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg", GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0", - GATE_BUS_TOP, 5, 0, 0), + GATE_BUS_TOP, 5, CLK_IS_CRITICAL, 0), GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl", GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0), GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl", GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp", - GATE_BUS_TOP, 8, 0, 0), + GATE_BUS_TOP, 8, CLK_IS_CRITICAL, 0), GATE(CLK_PCLK66_GPIO, "pclk66_gpio", "mout_user_pclk66_gpio", GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen", GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk266_isp", "mout_user_aclk266_isp", - GATE_BUS_TOP, 13, 0, 0), + GATE_BUS_TOP, 13, CLK_IS_CRITICAL, 0), GATE(0, "aclk166", "mout_user_aclk166", GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333", GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0), GATE(0, "aclk400_isp", "mout_user_aclk400_isp", - GATE_BUS_TOP, 16, 0, 0), + GATE_BUS_TOP, 16, CLK_IS_CRITICAL, 0), GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl", GATE_BUS_TOP, 17, CLK_IS_CRITICAL, 0), GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1", @@ -1158,8 +1158,10 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE_IP_GSCL1, 3, 0, 0), GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "dout_gscl_blk_333", GATE_IP_GSCL1, 4, 0, 0), - GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, 0, 0), - GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, 0, 0), + GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, + CLK_IS_CRITICAL, 0), + GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, + CLK_IS_CRITICAL, 0), GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "dout_gscl_blk_333", GATE_IP_GSCL1, 16, 0, 0), GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 4b1aa9382ad2..6f29ecd0442e 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -1706,7 +1706,8 @@ static const struct samsung_gate_clock peric_gate_clks[] __initconst = { GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric", ENABLE_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_i2s1_peric", - ENABLE_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0), + ENABLE_SCLK_PERIC, 6, + CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC, 5, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC, diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c index 6282ee2f361c..a8901f90a61a 100644 --- a/drivers/clk/sifive/fu540-prci.c +++ b/drivers/clk/sifive/fu540-prci.c @@ -586,7 +586,10 @@ static int sifive_fu540_prci_probe(struct platform_device *pdev) struct __prci_data *pd; int r; - pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); + pd = devm_kzalloc(dev, + struct_size(pd, hw_clks.hws, + ARRAY_SIZE(__prci_init_clocks)), + GFP_KERNEL); if (!pd) return -ENOMEM; diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c index c84d5bab7ac2..b95483bb6a5e 100644 --- a/drivers/clk/sirf/clk-atlas6.c +++ b/drivers/clk/sirf/clk-atlas6.c @@ -135,7 +135,7 @@ static void __init atlas6_clk_init(struct device_node *np) for (i = pll1; i < maxclk; i++) { atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]); - BUG_ON(!atlas6_clks[i]); + BUG_ON(IS_ERR(atlas6_clks[i])); } clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu"); clk_register_clkdev(atlas6_clks[io], NULL, "io"); diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c index 640270f51aa5..eb8862752c2b 100644 --- a/drivers/clk/sprd/pll.c +++ b/drivers/clk/sprd/pll.c @@ -105,7 +105,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll, cfg = kcalloc(regs_num, sizeof(*cfg), GFP_KERNEL); if (!cfg) - return -ENOMEM; + return parent_rate; for (i = 0; i < regs_num; i++) cfg[i] = sprd_pll_read(pll, i); diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c index 4413b6e04a8e..55873d4b7603 100644 --- a/drivers/clk/st/clk-flexgen.c +++ b/drivers/clk/st/clk-flexgen.c @@ -375,6 +375,7 @@ static void __init st_of_flexgen_setup(struct device_node *np) break; } + flex_flags &= ~CLK_IS_CRITICAL; of_clk_detect_critical(np, i, &flex_flags); /* diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 27201fd26e44..e1aa1fbac48a 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -90,7 +90,7 @@ static void sun6i_a31_get_pll1_factors(struct factors_request *req) * Round down the frequency to the closest multiple of either * 6 or 16 */ - u32 round_freq_6 = round_down(freq_mhz, 6); + u32 round_freq_6 = rounddown(freq_mhz, 6); u32 round_freq_16 = round_down(freq_mhz, 16); if (round_freq_6 > round_freq_16) diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c index 6a89936ba03a..eaa43575cfa5 100644 --- a/drivers/clk/ti/composite.c +++ b/drivers/clk/ti/composite.c @@ -196,6 +196,7 @@ cleanup: if (!cclk->comp_clks[i]) continue; list_del(&cclk->comp_clks[i]->link); + kfree(cclk->comp_clks[i]->parent_names); kfree(cclk->comp_clks[i]); } diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c index a11f93ecbf34..6f057ab9df03 100644 --- a/drivers/clk/zynqmp/clkc.c +++ b/drivers/clk/zynqmp/clkc.c @@ -558,7 +558,7 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, { int j; u32 num_nodes, clk_dev_id; - char *clk_out = NULL; + char *clk_out[MAX_NODES]; struct clock_topology *nodes; struct clk_hw *hw = NULL; @@ -572,16 +572,16 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, * Intermediate clock names are postfixed with type of clock. */ if (j != (num_nodes - 1)) { - clk_out = kasprintf(GFP_KERNEL, "%s%s", clk_name, + clk_out[j] = kasprintf(GFP_KERNEL, "%s%s", clk_name, clk_type_postfix[nodes[j].type]); } else { - clk_out = kasprintf(GFP_KERNEL, "%s", clk_name); + clk_out[j] = kasprintf(GFP_KERNEL, "%s", clk_name); } if (!clk_topology[nodes[j].type]) continue; - hw = (*clk_topology[nodes[j].type])(clk_out, clk_dev_id, + hw = (*clk_topology[nodes[j].type])(clk_out[j], clk_dev_id, parent_names, num_parents, &nodes[j]); @@ -590,9 +590,12 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name, __func__, clk_dev_id, clk_name, PTR_ERR(hw)); - parent_names[0] = clk_out; + parent_names[0] = clk_out[j]; } - kfree(clk_out); + + for (j = 0; j < num_nodes; j++) + kfree(clk_out[j]); + return hw; } diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 9a5464c625b4..4be83b4de2a0 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -69,7 +69,7 @@ static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI; static bool arch_timer_c3stop; static bool arch_timer_mem_use_virtual; static bool arch_counter_suspend_stop; -static bool vdso_default = true; +static enum vdso_arch_clockmode vdso_default = VDSO_CLOCKMODE_ARCHTIMER; static cpumask_t evtstrm_available = CPU_MASK_NONE; static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); @@ -476,6 +476,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .set_next_event_virt = erratum_set_next_event_tval_virt, }, #endif +#ifdef CONFIG_ARM64_ERRATUM_1418040 + { + .match_type = ate_match_local_cap_id, + .id = (void *)ARM64_WORKAROUND_1418040, + .desc = "ARM erratum 1418040", + .disable_compat_vdso = true, + }, +#endif }; typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, @@ -560,8 +568,11 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa * change both the default value and the vdso itself. */ if (wa->read_cntvct_el0) { - clocksource_counter.archdata.vdso_direct = false; - vdso_default = false; + clocksource_counter.archdata.clock_mode = VDSO_CLOCKMODE_NONE; + vdso_default = VDSO_CLOCKMODE_NONE; + } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) { + vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT; + clocksource_counter.archdata.clock_mode = vdso_default; } } @@ -979,7 +990,7 @@ static void __init arch_counter_register(unsigned type) } arch_timer_read_counter = rd; - clocksource_counter.archdata.vdso_direct = vdso_default; + clocksource_counter.archdata.clock_mode = vdso_default; } else { arch_timer_read_counter = arch_counter_get_cntvct_mem; } diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index 654766538f93..10ce69548f1b 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c @@ -222,7 +222,8 @@ static int apbt_next_event(unsigned long delta, /** * dw_apb_clockevent_init() - use an APB timer as a clock_event_device * - * @cpu: The CPU the events will be targeted at. + * @cpu: The CPU the events will be targeted at or -1 if CPU affiliation + * isn't required. * @name: The name used for the timer and the IRQ for it. * @rating: The rating to give the timer. * @base: I/O base for the timer registers. @@ -257,7 +258,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, dw_ced->ced.max_delta_ticks = 0x7fffffff; dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced); dw_ced->ced.min_delta_ticks = 5000; - dw_ced->ced.cpumask = cpumask_of(cpu); + dw_ced->ced.cpumask = cpu < 0 ? cpu_possible_mask : cpumask_of(cpu); dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; dw_ced->ced.set_state_shutdown = apbt_shutdown; diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index 8c28b127759f..6921b91b61ef 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -147,10 +147,6 @@ static int num_called; static int __init dw_apb_timer_init(struct device_node *timer) { switch (num_called) { - case 0: - pr_debug("%s: found clockevent timer\n", __func__); - add_clockevent(timer); - break; case 1: pr_debug("%s: found clocksource timer\n", __func__); add_clocksource(timer); @@ -161,6 +157,8 @@ static int __init dw_apb_timer_init(struct device_node *timer) #endif break; default: + pr_debug("%s: found clockevent timer\n", __func__); + add_clockevent(timer); break; } diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 211a4fce638b..18f8d6a5fd37 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -41,6 +41,7 @@ config ARM_ARMADA_37XX_CPUFREQ config ARM_ARMADA_8K_CPUFREQ tristate "Armada 8K CPUFreq driver" depends on ARCH_MVEBU && CPUFREQ_DT + select ARMADA_AP_CPU_CLK help This enables the CPUFreq driver support for Marvell Armada8k SOCs. diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index aa0f06dec959..df1c941260d1 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -456,6 +456,7 @@ static int __init armada37xx_cpufreq_driver_init(void) /* Now that everything is setup, enable the DVFS at hardware level */ armada37xx_cpufreq_enable_dvfs(nb_pm_base); + memset(&pdata, 0, sizeof(pdata)); pdata.suspend = armada37xx_cpufreq_suspend; pdata.resume = armada37xx_cpufreq_resume; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fa988bd1e606..194a6587a1de 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -616,6 +616,24 @@ static struct cpufreq_governor *find_governor(const char *str_governor) return NULL; } +static struct cpufreq_governor *get_governor(const char *str_governor) +{ + struct cpufreq_governor *t; + + mutex_lock(&cpufreq_governor_mutex); + t = find_governor(str_governor); + if (!t) + goto unlock; + + if (!try_module_get(t->owner)) + t = NULL; + +unlock: + mutex_unlock(&cpufreq_governor_mutex); + + return t; +} + static unsigned int cpufreq_parse_policy(char *str_governor) { if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) @@ -635,28 +653,14 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor) { struct cpufreq_governor *t; - mutex_lock(&cpufreq_governor_mutex); + t = get_governor(str_governor); + if (t) + return t; - t = find_governor(str_governor); - if (!t) { - int ret; + if (request_module("cpufreq_%s", str_governor)) + return NULL; - mutex_unlock(&cpufreq_governor_mutex); - - ret = request_module("cpufreq_%s", str_governor); - if (ret) - return NULL; - - mutex_lock(&cpufreq_governor_mutex); - - t = find_governor(str_governor); - } - if (t && !try_module_get(t->owner)) - t = NULL; - - mutex_unlock(&cpufreq_governor_mutex); - - return t; + return get_governor(str_governor); } /** @@ -810,12 +814,14 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, goto out; } + mutex_lock(&cpufreq_governor_mutex); for_each_governor(t) { if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) - goto out; + break; i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); } + mutex_unlock(&cpufreq_governor_mutex); out: i += sprintf(&buf[i], "\n"); return i; @@ -1053,15 +1059,17 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy) struct cpufreq_governor *def_gov = cpufreq_default_governor(); struct cpufreq_governor *gov = NULL; unsigned int pol = CPUFREQ_POLICY_UNKNOWN; + int ret; if (has_target()) { /* Update policy governor to the one used before hotplug. */ - gov = find_governor(policy->last_governor); + gov = get_governor(policy->last_governor); if (gov) { pr_debug("Restoring governor %s for cpu %d\n", policy->governor->name, policy->cpu); } else if (def_gov) { gov = def_gov; + __module_get(gov->owner); } else { return -ENODATA; } @@ -1084,7 +1092,11 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy) return -ENODATA; } - return cpufreq_set_policy(policy, gov, pol); + ret = cpufreq_set_policy(policy, gov, pol); + if (gov) + module_put(gov->owner); + + return ret; } static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index d3d7c4ef7d04..b9ca89dc75c7 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -649,11 +649,12 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, mutex_lock(&intel_pstate_limits_lock); if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { - u64 value; - - ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); - if (ret) - goto return_pref; + /* + * Use the cached HWP Request MSR value, because the register + * itself may be updated by intel_pstate_hwp_boost_up() or + * intel_pstate_hwp_boost_down() at any time. + */ + u64 value = READ_ONCE(cpu_data->hwp_req_cached); value &= ~GENMASK_ULL(31, 24); @@ -661,13 +662,18 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, epp = epp_values[pref_index - 1]; value |= (u64)epp << 24; + /* + * The only other updater of hwp_req_cached in the active mode, + * intel_pstate_hwp_set(), is called under the same lock as this + * function, so it cannot run in parallel with the update below. + */ + WRITE_ONCE(cpu_data->hwp_req_cached, value); ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); } else { if (epp == -EINVAL) epp = (pref_index - 1) << 2; ret = intel_pstate_set_epb(cpu_data->cpu, epp); } -return_pref: mutex_unlock(&intel_pstate_limits_lock); return ret; @@ -756,7 +762,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap); - if (global.no_turbo) + if (global.no_turbo || global.turbo_disabled) *current_max = HWP_GUARANTEED_PERF(cap); else *current_max = HWP_HIGHEST_PERF(cap); @@ -1571,6 +1577,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max); cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling; + cpu->pstate.turbo_pstate = phy_max; } else { cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; } @@ -2526,9 +2533,15 @@ static int intel_pstate_update_status(const char *buf, size_t size) { int ret; - if (size == 3 && !strncmp(buf, "off", size)) - return intel_pstate_driver ? - intel_pstate_unregister_driver() : -EINVAL; + if (size == 3 && !strncmp(buf, "off", size)) { + if (!intel_pstate_driver) + return -EINVAL; + + if (hwp_active) + return -EBUSY; + + return intel_pstate_unregister_driver(); + } if (size == 6 && !strncmp(buf, "active", size)) { if (intel_pstate_driver) { diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 29d2d7a21bd7..73f08cda21e0 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -148,7 +148,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, */ stop_critical_timings(); drv->states[index].enter_s2idle(dev, drv, index); - WARN_ON(!irqs_disabled()); + if (WARN_ON_ONCE(!irqs_disabled())) + local_irq_disable(); /* * timekeeping_resume() that will be called by tick_unfreeze() for the * first CPU executing it calls functions containing RCU read-side diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 2bb2683b493c..f8747322b3c7 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -480,7 +480,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &kdev->kobj, "state%d", i); if (ret) { - kfree(kobj); + kobject_put(&kobj->kobj); goto error_state; } cpuidle_add_s2idle_attr_group(kobj); @@ -611,7 +611,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, &kdev->kobj, "driver"); if (ret) { - kfree(kdrv); + kobject_put(&kdrv->kobj); return ret; } @@ -705,7 +705,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj, "cpuidle"); if (error) { - kfree(kdev); + kobject_put(&kdev->kobj); return error; } diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 06b2b3fa5206..0952f059d967 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -491,11 +491,9 @@ if CRYPTO_DEV_UX500 endif # if CRYPTO_DEV_UX500 config CRYPTO_DEV_ATMEL_AUTHENC - tristate "Support for Atmel IPSEC/SSL hw accelerator" + bool "Support for Atmel IPSEC/SSL hw accelerator" depends on ARCH_AT91 || COMPILE_TEST - select CRYPTO_AUTHENC - select CRYPTO_DEV_ATMEL_AES - select CRYPTO_DEV_ATMEL_SHA + depends on CRYPTO_DEV_ATMEL_AES help Some Atmel processors can combine the AES and SHA hw accelerators to enhance support of IPSEC/SSL. @@ -508,6 +506,8 @@ config CRYPTO_DEV_ATMEL_AES select CRYPTO_AES select CRYPTO_AEAD select CRYPTO_BLKCIPHER + select CRYPTO_AUTHENC if CRYPTO_DEV_ATMEL_AUTHENC + select CRYPTO_DEV_ATMEL_SHA if CRYPTO_DEV_ATMEL_AUTHENC help Some Atmel processors have AES hw accelerator. Select this if you want to use the Atmel module for diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 0ea621fb7483..c7345161b63d 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -1958,6 +1958,21 @@ static struct caam_skcipher_alg driver_algs[] = { }, .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, }, + { + .skcipher = { + .base = { + .cra_name = "ecb(arc4)", + .cra_driver_name = "ecb-arc4-caam", + .cra_blocksize = ARC4_BLOCK_SIZE, + }, + .setkey = arc4_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = ARC4_MIN_KEY_SIZE, + .max_keysize = ARC4_MAX_KEY_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB, + }, { .skcipher = { .base = { @@ -2094,21 +2109,6 @@ static struct caam_skcipher_alg driver_algs[] = { }, .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB, }, - { - .skcipher = { - .base = { - .cra_name = "ecb(arc4)", - .cra_driver_name = "ecb-arc4-caam", - .cra_blocksize = ARC4_BLOCK_SIZE, - }, - .setkey = arc4_skcipher_setkey, - .encrypt = skcipher_encrypt, - .decrypt = skcipher_decrypt, - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - }, - .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB, - }, }; static struct caam_aead_alg driver_aeads[] = { @@ -3652,7 +3652,7 @@ int caam_algapi_init(struct device *ctrldev) /* Skip ARC4 algorithms if not supported by device */ if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4) - continue; + continue; /* * Check support for AES modes not available diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 596ce28b957d..2410b23aa609 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -200,6 +200,7 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc) int status; memset(req_info, 0, sizeof(struct cpt_request_info)); + req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0; memset(fctx, 0, sizeof(struct fc_context)); create_input_list(req, enc, enc_iv_len); create_output_list(req, enc_iv_len); diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index 7a24019356b5..e343249c8d05 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -133,7 +133,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup gather (input) components */ g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component); - info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL); + info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->gather_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -150,7 +150,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup scatter (output) components */ s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component); - info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL); + info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->scatter_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -167,7 +167,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Create and initialize DPTR */ info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE; - info->in_buffer = kzalloc(info->dlen, GFP_KERNEL); + info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->in_buffer) { ret = -ENOMEM; goto scatter_gather_clean; @@ -195,7 +195,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, } /* Create and initialize RPTR */ - info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL); + info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->out_buffer) { ret = -ENOMEM; goto scatter_gather_clean; @@ -421,7 +421,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) struct cpt_vq_command vq_cmd; union cpt_inst_s cptinst; - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info)) { dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n"); return -ENOMEM; @@ -443,7 +443,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) * Get buffer for union cpt_res_s response * structure and its physical address */ - info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); + info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info->completion_addr)) { dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); ret = -ENOMEM; diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h index 3514b082eca7..1e8dd9ebcc17 100644 --- a/drivers/crypto/cavium/cpt/request_manager.h +++ b/drivers/crypto/cavium/cpt/request_manager.h @@ -62,6 +62,8 @@ struct cpt_request_info { union ctrl_info ctrl; /* User control information */ struct cptvf_request req; /* Request Information (Core specific) */ + bool may_sleep; + struct buf_ptr in[MAX_BUF_CNT]; struct buf_ptr out[MAX_BUF_CNT]; diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 8fec733f567f..63e227adbb13 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -10,10 +10,9 @@ config CRYPTO_DEV_CCP_DD config CRYPTO_DEV_SP_CCP bool "Cryptographic Coprocessor device" default y - depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_CCP_DD && DMADEVICES select HW_RANDOM select DMA_ENGINE - select DMADEVICES select CRYPTO_SHA1 select CRYPTO_SHA256 help diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 3f68262d9ab4..87a34d91fdf7 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -469,6 +469,7 @@ struct ccp_sg_workarea { unsigned int sg_used; struct scatterlist *dma_sg; + struct scatterlist *dma_sg_head; struct device *dma_dev; unsigned int dma_count; enum dma_data_direction dma_dir; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index c8da8eb160da..64112c736810 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -63,7 +63,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp) static void ccp_sg_free(struct ccp_sg_workarea *wa) { if (wa->dma_count) - dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); + dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir); wa->dma_count = 0; } @@ -92,6 +92,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, return 0; wa->dma_sg = sg; + wa->dma_sg_head = sg; wa->dma_dev = dev; wa->dma_dir = dma_dir; wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); @@ -104,14 +105,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) { unsigned int nbytes = min_t(u64, len, wa->bytes_left); + unsigned int sg_combined_len = 0; if (!wa->sg) return; wa->sg_used += nbytes; wa->bytes_left -= nbytes; - if (wa->sg_used == wa->sg->length) { - wa->sg = sg_next(wa->sg); + if (wa->sg_used == sg_dma_len(wa->dma_sg)) { + /* Advance to the next DMA scatterlist entry */ + wa->dma_sg = sg_next(wa->dma_sg); + + /* In the case that the DMA mapped scatterlist has entries + * that have been merged, the non-DMA mapped scatterlist + * must be advanced multiple times for each merged entry. + * This ensures that the current non-DMA mapped entry + * corresponds to the current DMA mapped entry. + */ + do { + sg_combined_len += wa->sg->length; + wa->sg = sg_next(wa->sg); + } while (wa->sg_used > sg_combined_len); + wa->sg_used = 0; } } @@ -299,7 +314,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) /* Update the structures and generate the count */ buf_count = 0; while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { - nbytes = min(sg_wa->sg->length - sg_wa->sg_used, + nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used, dm_wa->length - buf_count); nbytes = min_t(u64, sg_wa->bytes_left, nbytes); @@ -331,11 +346,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, * and destination. The resulting len values will always be <= UINT_MAX * because the dma length is an unsigned int. */ - sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; + sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used; sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); if (dst) { - sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; + sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used; sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); op_len = min(sg_src_len, sg_dst_len); } else { @@ -365,7 +380,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough data in the sg element, but we need to * adjust for any previously copied data */ - op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); + op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg); op->src.u.dma.offset = src->sg_wa.sg_used; op->src.u.dma.length = op_len & ~(block_size - 1); @@ -386,7 +401,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough room in the sg element, but we need to * adjust for any previously used area */ - op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); + op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg); op->dst.u.dma.offset = dst->sg_wa.sg_used; op->dst.u.dma.length = op->src.u.dma.length; } @@ -1777,8 +1792,9 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) LSB_ITEM_SIZE); break; default: + kfree(hmac_buf); ret = -EINVAL; - goto e_ctx; + goto e_data; } memset(&hmac_cmd, 0, sizeof(hmac_cmd)); @@ -2027,7 +2043,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) dst.sg_wa.sg_used = 0; for (i = 1; i <= src.sg_wa.dma_count; i++) { if (!dst.sg_wa.sg || - (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { + (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) { ret = -EINVAL; goto e_dst; } @@ -2053,8 +2069,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_dst; } - dst.sg_wa.sg_used += src.sg_wa.sg->length; - if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { + dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg); + if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) { dst.sg_wa.sg = sg_next(dst.sg_wa.sg); dst.sg_wa.sg_used = 0; } diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index cd9c60268bf8..9bf0cce578f0 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -163,7 +163,6 @@ static int cc_cipher_init(struct crypto_tfm *tfm) skcipher_alg.base); struct device *dev = drvdata_to_dev(cc_alg->drvdata); unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; - int rc = 0; dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p, crypto_tfm_alg_name(tfm)); @@ -175,10 +174,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm) ctx_p->flow_mode = cc_alg->flow_mode; ctx_p->drvdata = cc_alg->drvdata; + if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { + /* Alloc hash tfm for essiv */ + ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); + if (IS_ERR(ctx_p->shash_tfm)) { + dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); + return PTR_ERR(ctx_p->shash_tfm); + } + } + /* Allocate key buffer, cache line aligned */ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL); if (!ctx_p->user.key) - return -ENOMEM; + goto free_shash; dev_dbg(dev, "Allocated key buffer in context. key=@%p\n", ctx_p->user.key); @@ -190,21 +198,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm) if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n", max_key_buf_size, ctx_p->user.key); - return -ENOMEM; + goto free_key; } dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n", max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr); - if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { - /* Alloc hash tfm for essiv */ - ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); - if (IS_ERR(ctx_p->shash_tfm)) { - dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); - return PTR_ERR(ctx_p->shash_tfm); - } - } + return 0; - return rc; +free_key: + kfree(ctx_p->user.key); +free_shash: + crypto_free_shash(ctx_p->shash_tfm); + + return -ENOMEM; } static void cc_cipher_exit(struct crypto_tfm *tfm) diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 01dd418bdadc..fe2eadc0ce83 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -2818,7 +2818,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; unsigned int c_id = a_ctx(tfm)->tx_chan_id; unsigned int ccm_xtra; - unsigned char tag_offset = 0, auth_offset = 0; + unsigned int tag_offset = 0, auth_offset = 0; unsigned int assoclen; if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index c27e7160d2df..4ad4ffd90cee 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, dma_addr_t *psec_sgl, struct scatterlist *sgl, int count, - struct sec_dev_info *info) + struct sec_dev_info *info, + gfp_t gfp) { struct sec_hw_sgl *sgl_current = NULL; struct sec_hw_sgl *sgl_next; @@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, sge_index = i % SEC_MAX_SGE_NUM; if (sge_index == 0) { sgl_next = dma_pool_zalloc(info->hw_sgl_pool, - GFP_KERNEL, &sgl_next_dma); + gfp, &sgl_next_dma); if (!sgl_next) { ret = -ENOMEM; goto err_free_hw_sgls; @@ -545,14 +546,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow) } static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes, - int *steps) + int *steps, gfp_t gfp) { size_t *sizes; int i; /* Split into suitable sized blocks */ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT; - sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL); + sizes = kcalloc(*steps, sizeof(*sizes), gfp); if (!sizes) return -ENOMEM; @@ -568,7 +569,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, int steps, struct scatterlist ***splits, int **splits_nents, int sgl_len_in, - struct device *dev) + struct device *dev, gfp_t gfp) { int ret, count; @@ -576,12 +577,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, if (!count) return -EINVAL; - *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL); + *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp); if (!*splits) { ret = -ENOMEM; goto err_unmap_sg; } - *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL); + *splits_nents = kcalloc(steps, sizeof(int), gfp); if (!*splits_nents) { ret = -ENOMEM; goto err_free_splits; @@ -589,7 +590,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, /* output the scatter list before and after this */ ret = sg_split(sgl, count, 0, steps, split_sizes, - *splits, *splits_nents, GFP_KERNEL); + *splits, *splits_nents, gfp); if (ret) { ret = -ENOMEM; goto err_free_splits_nents; @@ -630,13 +631,13 @@ static struct sec_request_el int el_size, bool different_dest, struct scatterlist *sgl_in, int n_ents_in, struct scatterlist *sgl_out, int n_ents_out, - struct sec_dev_info *info) + struct sec_dev_info *info, gfp_t gfp) { struct sec_request_el *el; struct sec_bd_info *req; int ret; - el = kzalloc(sizeof(*el), GFP_KERNEL); + el = kzalloc(sizeof(*el), gfp); if (!el) return ERR_PTR(-ENOMEM); el->el_length = el_size; @@ -668,7 +669,7 @@ static struct sec_request_el el->sgl_in = sgl_in; ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in, - n_ents_in, info); + n_ents_in, info, gfp); if (ret) goto err_free_el; @@ -679,7 +680,7 @@ static struct sec_request_el el->sgl_out = sgl_out; ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out, el->sgl_out, - n_ents_out, info); + n_ents_out, info, gfp); if (ret) goto err_free_hw_sgl_in; @@ -720,6 +721,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, int *splits_out_nents = NULL; struct sec_request_el *el, *temp; bool split = skreq->src != skreq->dst; + gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; mutex_init(&sec_req->lock); sec_req->req_base = &skreq->base; @@ -728,13 +730,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_in = sg_nents(skreq->src); ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes, - &steps); + &steps, gfp); if (ret) return ret; sec_req->num_elements = steps; ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in, &splits_in_nents, sec_req->len_in, - info->dev); + info->dev, gfp); if (ret) goto err_free_split_sizes; @@ -742,7 +744,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_out = sg_nents(skreq->dst); ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, &splits_out, &splits_out_nents, - sec_req->len_out, info->dev); + sec_req->len_out, info->dev, gfp); if (ret) goto err_unmap_in_sg; } @@ -775,7 +777,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, splits_in[i], splits_in_nents[i], split ? splits_out[i] : NULL, split ? splits_out_nents[i] : 0, - info); + info, gfp); if (IS_ERR(el)) { ret = PTR_ERR(el); goto err_free_elements; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index ac80bc6af093..aba5db3c0588 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -165,8 +165,6 @@ struct omap_sham_hmac_ctx { }; struct omap_sham_ctx { - struct omap_sham_dev *dd; - unsigned long flags; /* fallback stuff */ @@ -918,27 +916,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) return 0; } +struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) +{ + struct omap_sham_dev *dd; + + if (ctx->dd) + return ctx->dd; + + spin_lock_bh(&sham.lock); + dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list); + list_move_tail(&dd->list, &sham.dev_list); + ctx->dd = dd; + spin_unlock_bh(&sham.lock); + + return dd; +} + static int omap_sham_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - struct omap_sham_dev *dd = NULL, *tmp; + struct omap_sham_dev *dd; int bs = 0; - spin_lock_bh(&sham.lock); - if (!tctx->dd) { - list_for_each_entry(tmp, &sham.dev_list, list) { - dd = tmp; - break; - } - tctx->dd = dd; - } else { - dd = tctx->dd; - } - spin_unlock_bh(&sham.lock); + ctx->dd = NULL; - ctx->dd = dd; + dd = omap_sham_find_dev(ctx); + if (!dd) + return -ENODEV; ctx->flags = 0; @@ -1187,8 +1193,7 @@ err1: static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct omap_sham_dev *dd = tctx->dd; + struct omap_sham_dev *dd = ctx->dd; ctx->op = op; @@ -1198,7 +1203,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) static int omap_sham_update(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - struct omap_sham_dev *dd = ctx->dd; + struct omap_sham_dev *dd = omap_sham_find_dev(ctx); if (!req->nbytes) return 0; @@ -1302,21 +1307,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, struct omap_sham_hmac_ctx *bctx = tctx->base; int bs = crypto_shash_blocksize(bctx->shash); int ds = crypto_shash_digestsize(bctx->shash); - struct omap_sham_dev *dd = NULL, *tmp; int err, i; - spin_lock_bh(&sham.lock); - if (!tctx->dd) { - list_for_each_entry(tmp, &sham.dev_list, list) { - dd = tmp; - break; - } - tctx->dd = dd; - } else { - dd = tctx->dd; - } - spin_unlock_bh(&sham.lock); - err = crypto_shash_setkey(tctx->fallback, key, keylen); if (err) return err; @@ -1334,7 +1326,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, memset(bctx->ipad + keylen, 0, bs - keylen); - if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { + if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) { memcpy(bctx->opad, bctx->ipad, bs); for (i = 0; i < bs; i++) { @@ -2136,6 +2128,7 @@ static int omap_sham_probe(struct platform_device *pdev) } dd->flags |= dd->pdata->flags; + sham.flags |= dd->pdata->flags; pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); @@ -2163,6 +2156,9 @@ static int omap_sham_probe(struct platform_device *pdev) spin_unlock(&sham.lock); for (i = 0; i < dd->pdata->algs_info_size; i++) { + if (dd->pdata->algs_info[i].registered) + break; + for (j = 0; j < dd->pdata->algs_info[i].size; j++) { struct ahash_alg *alg; @@ -2214,9 +2210,11 @@ static int omap_sham_remove(struct platform_device *pdev) list_del(&dd->list); spin_unlock(&sham.lock); for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) - for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { crypto_unregister_ahash( &dd->pdata->algs_info[i].algs_list[j]); + dd->pdata->algs_info[i].registered--; + } tasklet_kill(&dd->done_task); pm_runtime_disable(&pdev->dev); diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 6bd8f6a2a24f..aeb03081415c 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -332,13 +332,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle } return 0; out_err: + /* Do not free the list head unless we allocated it. */ + tail_old = tail_old->next; + if (flag) { + kfree(*init_tab_base); + *init_tab_base = NULL; + } + while (tail_old) { mem_init = tail_old->next; kfree(tail_old); tail_old = mem_init; } - if (flag) - kfree(*init_tab_base); return -ENOMEM; } diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 9e11c3480353..e68b856d03b6 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -28,8 +28,10 @@ /* Registers values */ #define CRC_CR_RESET BIT(0) -#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5)) -#define CRC_INIT_DEFAULT 0xFFFFFFFF +#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5)) +#define CRC_CR_REV_IN_BYTE BIT(5) +#define CRC_CR_REV_OUT BIT(7) +#define CRC32C_INIT_DEFAULT 0xFFFFFFFF #define CRC_AUTOSUSPEND_DELAY 50 @@ -38,8 +40,6 @@ struct stm32_crc { struct device *dev; void __iomem *regs; struct clk *clk; - u8 pending_data[sizeof(u32)]; - size_t nb_pending_bytes; }; struct stm32_crc_list { @@ -59,14 +59,13 @@ struct stm32_crc_ctx { struct stm32_crc_desc_ctx { u32 partial; /* crc32c: partial in first 4 bytes of that struct */ - struct stm32_crc *crc; }; static int stm32_crc32_cra_init(struct crypto_tfm *tfm) { struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); - mctx->key = CRC_INIT_DEFAULT; + mctx->key = 0; mctx->poly = CRC32_POLY_LE; return 0; } @@ -75,7 +74,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm) { struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); - mctx->key = CRC_INIT_DEFAULT; + mctx->key = CRC32C_INIT_DEFAULT; mctx->poly = CRC32C_POLY_LE; return 0; } @@ -94,32 +93,42 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key, return 0; } +static struct stm32_crc *stm32_crc_get_next_crc(void) +{ + struct stm32_crc *crc; + + spin_lock_bh(&crc_list.lock); + crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list); + if (crc) + list_move_tail(&crc->list, &crc_list.dev_list); + spin_unlock_bh(&crc_list.lock); + + return crc; +} + static int stm32_crc_init(struct shash_desc *desc) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct stm32_crc *crc; - spin_lock_bh(&crc_list.lock); - list_for_each_entry(crc, &crc_list.dev_list, list) { - ctx->crc = crc; - break; - } - spin_unlock_bh(&crc_list.lock); + crc = stm32_crc_get_next_crc(); + if (!crc) + return -ENODEV; - pm_runtime_get_sync(ctx->crc->dev); + pm_runtime_get_sync(crc->dev); /* Reset, set key, poly and configure in bit reverse mode */ - writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT); - writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL); - writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR); + writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT); + writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); + writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, + crc->regs + CRC_CR); /* Store partial result */ - ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR); - ctx->crc->nb_pending_bytes = 0; + ctx->partial = readl_relaxed(crc->regs + CRC_DR); - pm_runtime_mark_last_busy(ctx->crc->dev); - pm_runtime_put_autosuspend(ctx->crc->dev); + pm_runtime_mark_last_busy(crc->dev); + pm_runtime_put_autosuspend(crc->dev); return 0; } @@ -128,31 +137,49 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, unsigned int length) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); - struct stm32_crc *crc = ctx->crc; - u32 *d32; - unsigned int i; + struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct stm32_crc *crc; + + crc = stm32_crc_get_next_crc(); + if (!crc) + return -ENODEV; pm_runtime_get_sync(crc->dev); - if (unlikely(crc->nb_pending_bytes)) { - while (crc->nb_pending_bytes != sizeof(u32) && length) { - /* Fill in pending data */ - crc->pending_data[crc->nb_pending_bytes++] = *(d8++); + /* + * Restore previously calculated CRC for this context as init value + * Restore polynomial configuration + * Configure in register for word input data, + * Configure out register in reversed bit mode data. + */ + writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT); + writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); + writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, + crc->regs + CRC_CR); + + if (d8 != PTR_ALIGN(d8, sizeof(u32))) { + /* Configure for byte data */ + writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, + crc->regs + CRC_CR); + while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) { + writeb_relaxed(*d8++, crc->regs + CRC_DR); length--; } - - if (crc->nb_pending_bytes == sizeof(u32)) { - /* Process completed pending data */ - writel_relaxed(*(u32 *)crc->pending_data, - crc->regs + CRC_DR); - crc->nb_pending_bytes = 0; - } + /* Configure for word data */ + writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, + crc->regs + CRC_CR); } - d32 = (u32 *)d8; - for (i = 0; i < length >> 2; i++) - /* Process 32 bits data */ - writel_relaxed(*(d32++), crc->regs + CRC_DR); + for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32)) + writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR); + + if (length) { + /* Configure for byte data */ + writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, + crc->regs + CRC_CR); + while (length--) + writeb_relaxed(*d8++, crc->regs + CRC_DR); + } /* Store partial result */ ctx->partial = readl_relaxed(crc->regs + CRC_DR); @@ -160,22 +187,6 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, pm_runtime_mark_last_busy(crc->dev); pm_runtime_put_autosuspend(crc->dev); - /* Check for pending data (non 32 bits) */ - length &= 3; - if (likely(!length)) - return 0; - - if ((crc->nb_pending_bytes + length) >= sizeof(u32)) { - /* Shall not happen */ - dev_err(crc->dev, "Pending data overflow\n"); - return -EINVAL; - } - - d8 = (const u8 *)d32; - for (i = 0; i < length; i++) - /* Store pending data */ - crc->pending_data[crc->nb_pending_bytes++] = *(d8++); - return 0; } @@ -204,6 +215,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data, return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out); } +static unsigned int refcnt; +static DEFINE_MUTEX(refcnt_lock); static struct shash_alg algs[] = { /* CRC-32 */ { @@ -294,12 +307,18 @@ static int stm32_crc_probe(struct platform_device *pdev) list_add(&crc->list, &crc_list.dev_list); spin_unlock(&crc_list.lock); - ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); - if (ret) { - dev_err(dev, "Failed to register\n"); - clk_disable_unprepare(crc->clk); - return ret; + mutex_lock(&refcnt_lock); + if (!refcnt) { + ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); + if (ret) { + mutex_unlock(&refcnt_lock); + dev_err(dev, "Failed to register\n"); + clk_disable_unprepare(crc->clk); + return ret; + } } + refcnt++; + mutex_unlock(&refcnt_lock); dev_info(dev, "Initialized\n"); @@ -320,7 +339,10 @@ static int stm32_crc_remove(struct platform_device *pdev) list_del(&crc->list); spin_unlock(&crc_list.lock); - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); + mutex_lock(&refcnt_lock); + if (!--refcnt) + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); + mutex_unlock(&refcnt_lock); pm_runtime_disable(crc->dev); pm_runtime_put_noidle(crc->dev); diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 26a654dbc69a..b93685288187 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -318,11 +318,15 @@ EXPORT_SYMBOL_GPL(dax_direct_access); bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, int blocksize, sector_t start, sector_t len) { + if (!dax_dev) + return false; + if (!dax_alive(dax_dev)) return false; return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len); } +EXPORT_SYMBOL_GPL(dax_supported); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c index 2e65d7279d79..027769e39f9b 100644 --- a/drivers/devfreq/rk3399_dmc.c +++ b/drivers/devfreq/rk3399_dmc.c @@ -95,18 +95,20 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq, mutex_lock(&dmcfreq->lock); - if (target_rate >= dmcfreq->odt_dis_freq) - odt_enable = true; + if (dmcfreq->regmap_pmu) { + if (target_rate >= dmcfreq->odt_dis_freq) + odt_enable = true; - /* - * This makes a SMC call to the TF-A to set the DDR PD (power-down) - * timings and to enable or disable the ODT (on-die termination) - * resistors. - */ - arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0, - dmcfreq->odt_pd_arg1, - ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD, - odt_enable, 0, 0, 0, &res); + /* + * This makes a SMC call to the TF-A to set the DDR PD + * (power-down) timings and to enable or disable the + * ODT (on-die termination) resistors. + */ + arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0, + dmcfreq->odt_pd_arg1, + ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD, + odt_enable, 0, 0, 0, &res); + } /* * If frequency scaling from low to high, adjust voltage first. @@ -364,16 +366,21 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) if (res.a0) { dev_err(dev, "Failed to set dram param: %ld\n", res.a0); - return -EINVAL; + ret = -EINVAL; + goto err_edev; } } } node = of_parse_phandle(np, "rockchip,pmu", 0); - if (node) { - data->regmap_pmu = syscon_node_to_regmap(node); - if (IS_ERR(data->regmap_pmu)) - return PTR_ERR(data->regmap_pmu); + if (!node) + goto no_pmu; + + data->regmap_pmu = syscon_node_to_regmap(node); + of_node_put(node); + if (IS_ERR(data->regmap_pmu)) { + ret = PTR_ERR(data->regmap_pmu); + goto err_edev; } regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val); @@ -391,9 +398,11 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq; break; default: - return -EINVAL; + ret = -EINVAL; + goto err_edev; }; +no_pmu: arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT, 0, 0, 0, 0, &res); @@ -425,7 +434,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) */ if (dev_pm_opp_of_add_table(dev)) { dev_err(dev, "Invalid operating-points in device tree.\n"); - return -EINVAL; + ret = -EINVAL; + goto err_edev; } of_property_read_u32(np, "upthreshold", @@ -465,6 +475,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) err_free_opp: dev_pm_opp_of_remove_table(&pdev->dev); +err_edev: + devfreq_event_disable_edev(data->edev); + return ret; } diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index e854dd845682..7c23fc36cfe6 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -46,46 +46,20 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) size_t ret = 0; dmabuf = dentry->d_fsdata; - mutex_lock(&dmabuf->lock); + spin_lock(&dmabuf->name_lock); if (dmabuf->name) ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN); - mutex_unlock(&dmabuf->lock); + spin_unlock(&dmabuf->name_lock); return dynamic_dname(dentry, buffer, buflen, "/%s:%s", dentry->d_name.name, ret > 0 ? name : ""); } -static const struct dentry_operations dma_buf_dentry_ops = { - .d_dname = dmabuffs_dname, -}; - -static struct vfsmount *dma_buf_mnt; - -static int dma_buf_fs_init_context(struct fs_context *fc) -{ - struct pseudo_fs_context *ctx; - - ctx = init_pseudo(fc, DMA_BUF_MAGIC); - if (!ctx) - return -ENOMEM; - ctx->dops = &dma_buf_dentry_ops; - return 0; -} - -static struct file_system_type dma_buf_fs_type = { - .name = "dmabuf", - .init_fs_context = dma_buf_fs_init_context, - .kill_sb = kill_anon_super, -}; - -static int dma_buf_release(struct inode *inode, struct file *file) +static void dma_buf_release(struct dentry *dentry) { struct dma_buf *dmabuf; - if (!is_dma_buf_file(file)) - return -EINVAL; - - dmabuf = file->private_data; + dmabuf = dentry->d_fsdata; BUG_ON(dmabuf->vmapping_counter); @@ -111,9 +85,32 @@ static int dma_buf_release(struct inode *inode, struct file *file) module_put(dmabuf->owner); kfree(dmabuf->name); kfree(dmabuf); +} + +static const struct dentry_operations dma_buf_dentry_ops = { + .d_dname = dmabuffs_dname, + .d_release = dma_buf_release, +}; + +static struct vfsmount *dma_buf_mnt; + +static int dma_buf_fs_init_context(struct fs_context *fc) +{ + struct pseudo_fs_context *ctx; + + ctx = init_pseudo(fc, DMA_BUF_MAGIC); + if (!ctx) + return -ENOMEM; + ctx->dops = &dma_buf_dentry_ops; return 0; } +static struct file_system_type dma_buf_fs_type = { + .name = "dmabuf", + .init_fs_context = dma_buf_fs_init_context, + .kill_sb = kill_anon_super, +}; + static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) { struct dma_buf *dmabuf; @@ -342,8 +339,10 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) kfree(name); goto out_unlock; } + spin_lock(&dmabuf->name_lock); kfree(dmabuf->name); dmabuf->name = name; + spin_unlock(&dmabuf->name_lock); out_unlock: mutex_unlock(&dmabuf->lock); @@ -436,14 +435,13 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) /* Don't count the temporary reference taken inside procfs seq_show */ seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); - mutex_lock(&dmabuf->lock); + spin_lock(&dmabuf->name_lock); if (dmabuf->name) seq_printf(m, "name:\t%s\n", dmabuf->name); - mutex_unlock(&dmabuf->lock); + spin_unlock(&dmabuf->name_lock); } static const struct file_operations dma_buf_fops = { - .release = dma_buf_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, .poll = dma_buf_poll, @@ -572,6 +570,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->size = exp_info->size; dmabuf->exp_name = exp_info->exp_name; dmabuf->owner = exp_info->owner; + spin_lock_init(&dmabuf->name_lock); init_waitqueue_head(&dmabuf->poll); dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index 8a05db3343d3..dcbcb712de6e 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -135,11 +135,13 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) if (ret < 0) { dev_warn(&adev->dev, "error in parsing resource group\n"); - return; + break; } grp = (struct acpi_csrt_group *)((void *)grp + grp->length); } + + acpi_put_table((struct acpi_table_header *)csrt); } /** diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 672c73b4a2d4..ff366c2f58c1 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1667,6 +1667,8 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, return NULL; dmac_pdev = of_find_device_by_node(dma_spec->np); + if (!dmac_pdev) + return NULL; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index bf95f1d551c5..0ecb724b394f 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -885,24 +885,11 @@ static int jz4780_dma_probe(struct platform_device *pdev) return -EINVAL; } - ret = platform_get_irq(pdev, 0); - if (ret < 0) - return ret; - - jzdma->irq = ret; - - ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), - jzdma); - if (ret) { - dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); - return ret; - } - jzdma->clk = devm_clk_get(dev, NULL); if (IS_ERR(jzdma->clk)) { dev_err(dev, "failed to get clock\n"); ret = PTR_ERR(jzdma->clk); - goto err_free_irq; + return ret; } clk_prepare_enable(jzdma->clk); @@ -955,10 +942,23 @@ static int jz4780_dma_probe(struct platform_device *pdev) jzchan->vchan.desc_free = jz4780_dma_desc_free; } + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto err_disable_clk; + + jzdma->irq = ret; + + ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), + jzdma); + if (ret) { + dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); + goto err_disable_clk; + } + ret = dmaenginem_async_device_register(dd); if (ret) { dev_err(dev, "failed to register device\n"); - goto err_disable_clk; + goto err_free_irq; } /* Register with OF DMA helpers. */ @@ -966,17 +966,17 @@ static int jz4780_dma_probe(struct platform_device *pdev) jzdma); if (ret) { dev_err(dev, "failed to register OF DMA controller\n"); - goto err_disable_clk; + goto err_free_irq; } dev_info(dev, "JZ4780 DMA controller initialised\n"); return 0; -err_disable_clk: - clk_disable_unprepare(jzdma->clk); - err_free_irq: free_irq(jzdma->irq, jzdma); + +err_disable_clk: + clk_disable_unprepare(jzdma->clk); return ret; } diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 0425984db118..62d9825a49e9 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -1168,6 +1168,8 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) } else if (dmatest_run) { if (!is_threaded_test_pending(info)) { pr_info("No channels configured, continue with any\n"); + if (!is_threaded_test_run(info)) + stop_threaded_test(info); add_threaded_test(info); } start_threaded_tests(info); diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index ff392c01bad1..7f9a86c3c58f 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -391,7 +391,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) if (xfer->cyclic) { burst->dar = xfer->xfer.cyclic.paddr; } else { - burst->dar = sg_dma_address(sg); + burst->dar = dst_addr; /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a @@ -399,14 +399,13 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) * and destination addresses are increased * by the same portion (data length) */ - src_addr += sg_dma_len(sg); } } else { burst->dar = dst_addr; if (xfer->cyclic) { burst->sar = xfer->xfer.cyclic.paddr; } else { - burst->sar = sg_dma_address(sg); + burst->sar = src_addr; /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a @@ -414,12 +413,14 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) * and destination addresses are increased * by the same portion (data length) */ - dst_addr += sg_dma_len(sg); } } - if (!xfer->cyclic) + if (!xfer->cyclic) { + src_addr += sg_dma_len(sg); + dst_addr += sg_dma_len(sg); sg = sg_next(sg); + } } return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 21cb2a58dbd2..a1b56f52db2f 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -118,16 +118,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); - if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) - return; - dw->initialize_chan(dwc); /* Enable interrupts */ channel_set_bit(dw, MASK.XFER, dwc->mask); channel_set_bit(dw, MASK.ERROR, dwc->mask); - - set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); } /*----------------------------------------------------------------------*/ @@ -954,8 +949,6 @@ static void dwc_issue_pending(struct dma_chan *chan) void do_dw_dma_off(struct dw_dma *dw) { - unsigned int i; - dma_writel(dw, CFG, 0); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); @@ -966,9 +959,6 @@ void do_dw_dma_off(struct dw_dma *dw) while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) cpu_relax(); - - for (i = 0; i < dw->dma.chancnt; i++) - clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); } void do_dw_dma_on(struct dw_dma *dw) @@ -1032,8 +1022,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) /* Clear custom channel configuration */ memset(&dwc->dws, 0, sizeof(struct dw_dma_slave)); - clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); - /* Disable interrupts */ channel_clear_bit(dw, MASK.XFER, dwc->mask); channel_clear_bit(dw, MASK.BLOCK, dwc->mask); diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 2135ebeb5bd1..4a13e158065a 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -361,26 +361,28 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, /* * TCD parameters are stored in struct fsl_edma_hw_tcd in little * endian format. However, we need to load the TCD registers in - * big- or little-endian obeying the eDMA engine model endian. + * big- or little-endian obeying the eDMA engine model endian, + * and this is performed from specific edma_write functions */ edma_writew(edma, 0, ®s->tcd[ch].csr); - edma_writel(edma, le32_to_cpu(tcd->saddr), ®s->tcd[ch].saddr); - edma_writel(edma, le32_to_cpu(tcd->daddr), ®s->tcd[ch].daddr); - edma_writew(edma, le16_to_cpu(tcd->attr), ®s->tcd[ch].attr); - edma_writew(edma, le16_to_cpu(tcd->soff), ®s->tcd[ch].soff); + edma_writel(edma, (s32)tcd->saddr, ®s->tcd[ch].saddr); + edma_writel(edma, (s32)tcd->daddr, ®s->tcd[ch].daddr); - edma_writel(edma, le32_to_cpu(tcd->nbytes), ®s->tcd[ch].nbytes); - edma_writel(edma, le32_to_cpu(tcd->slast), ®s->tcd[ch].slast); + edma_writew(edma, (s16)tcd->attr, ®s->tcd[ch].attr); + edma_writew(edma, tcd->soff, ®s->tcd[ch].soff); - edma_writew(edma, le16_to_cpu(tcd->citer), ®s->tcd[ch].citer); - edma_writew(edma, le16_to_cpu(tcd->biter), ®s->tcd[ch].biter); - edma_writew(edma, le16_to_cpu(tcd->doff), ®s->tcd[ch].doff); + edma_writel(edma, (s32)tcd->nbytes, ®s->tcd[ch].nbytes); + edma_writel(edma, (s32)tcd->slast, ®s->tcd[ch].slast); - edma_writel(edma, le32_to_cpu(tcd->dlast_sga), + edma_writew(edma, (s16)tcd->citer, ®s->tcd[ch].citer); + edma_writew(edma, (s16)tcd->biter, ®s->tcd[ch].biter); + edma_writew(edma, (s16)tcd->doff, ®s->tcd[ch].doff); + + edma_writel(edma, (s32)tcd->dlast_sga, ®s->tcd[ch].dlast_sga); - edma_writew(edma, le16_to_cpu(tcd->csr), ®s->tcd[ch].csr); + edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr); } static inline diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 7c72b9cd8bf7..b254dc656980 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -33,7 +33,7 @@ #define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0) #define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1) #define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1)) -#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0)) +#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0)) #define EDMA_TCD_ATTR_SSIZE_8BIT 0 #define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8) #define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8) diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index a01c9d693e3e..5d2f22744923 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -45,6 +45,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) fsl_chan = &fsl_edma->chans[ch]; spin_lock(&fsl_chan->vchan.lock); + + if (!fsl_chan->edesc) { + /* terminate_all called before */ + spin_unlock(&fsl_chan->vchan.lock); + continue; + } + if (!fsl_chan->edesc->iscyclic) { fsl_edma_get_realcnt(fsl_chan); list_del(&fsl_chan->edesc->vdesc.node); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 56f18ae99233..308bed0a560a 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -205,10 +205,10 @@ struct fsldma_chan { #else static u64 fsl_ioread64(const u64 __iomem *addr) { - u32 fsl_addr = lower_32_bits(addr); - u64 fsl_addr_hi = (u64)in_le32((u32 *)(fsl_addr + 1)) << 32; + u32 val_lo = in_le32((u32 __iomem *)addr); + u32 val_hi = in_le32((u32 __iomem *)addr + 1); - return fsl_addr_hi | in_le32((u32 *)fsl_addr); + return ((u64)val_hi << 32) + val_lo; } static void fsl_iowrite64(u64 val, u64 __iomem *addr) @@ -219,10 +219,10 @@ static void fsl_iowrite64(u64 val, u64 __iomem *addr) static u64 fsl_ioread64be(const u64 __iomem *addr) { - u32 fsl_addr = lower_32_bits(addr); - u64 fsl_addr_hi = (u64)in_be32((u32 *)fsl_addr) << 32; + u32 val_hi = in_be32((u32 __iomem *)addr); + u32 val_lo = in_be32((u32 __iomem *)addr + 1); - return fsl_addr_hi | in_be32((u32 *)(fsl_addr + 1)); + return ((u64)val_hi << 32) + val_lo; } static void fsl_iowrite64be(u64 val, u64 __iomem *addr) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 18c011e57592..8e2a4d1f0be5 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -26,6 +26,18 @@ #include "../dmaengine.h" +int completion_timeout = 200; +module_param(completion_timeout, int, 0644); +MODULE_PARM_DESC(completion_timeout, + "set ioat completion timeout [msec] (default 200 [msec])"); +int idle_timeout = 2000; +module_param(idle_timeout, int, 0644); +MODULE_PARM_DESC(idle_timeout, + "set ioat idel timeout [msec] (default 2000 [msec])"); + +#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout) +#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout) + static char *chanerr_str[] = { "DMA Transfer Source Address Error", "DMA Transfer Destination Address Error", diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index b8e8e0b9693c..4ac9134962f3 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -99,8 +99,6 @@ struct ioatdma_chan { #define IOAT_RUN 5 #define IOAT_CHAN_ACTIVE 6 struct timer_list timer; - #define COMPLETION_TIMEOUT msecs_to_jiffies(100) - #define IDLE_TIMEOUT msecs_to_jiffies(2000) #define RESET_DELAY msecs_to_jiffies(100) struct ioatdma_device *ioat_dma; dma_addr_t completion_dma; diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index e15bd15a9ef6..e12b754e6398 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c @@ -35,6 +35,13 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id) mcf_chan = &mcf_edma->chans[ch]; spin_lock(&mcf_chan->vchan.lock); + + if (!mcf_chan->edesc) { + /* terminate_all called before */ + spin_unlock(&mcf_chan->vchan.lock); + continue; + } + if (!mcf_chan->edesc->iscyclic) { list_del(&mcf_chan->edesc->vdesc.node); vchan_cookie_complete(&mcf_chan->edesc->vdesc); diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index c2d779daa4b5..4bbf4172b9bf 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -69,12 +69,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, return NULL; chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target); - if (chan) { - chan->router = ofdma->dma_router; - chan->route_data = route_data; - } else { + if (IS_ERR_OR_NULL(chan)) { ofdma->dma_router->route_free(ofdma->dma_router->dev, route_data); + } else { + chan->router = ofdma->dma_router; + chan->route_data = route_data; } /* diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6cce9ef61b29..cd81d10974a2 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2788,6 +2788,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, while (burst != (1 << desc->rqcfg.brst_size)) desc->rqcfg.brst_size++; + desc->rqcfg.brst_len = get_burst_len(desc, len); /* * If burst size is smaller than bus width then make sure we only * transfer one at a time to avoid a burst stradling an MFIFO entry. @@ -2795,7 +2796,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) desc->rqcfg.brst_len = 1; - desc->rqcfg.brst_len = get_burst_len(desc, len); desc->bytes_requested = len; desc->txd.flags = flags; diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index b218a013c260..8f7ceb698226 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -586,6 +586,8 @@ static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan) desc->residue = usb_dmac_get_current_residue(chan, desc, desc->sg_index - 1); desc->done_cookie = desc->vd.tx.cookie; + desc->vd.tx_result.result = DMA_TRANS_NOERROR; + desc->vd.tx_result.residue = desc->residue; vchan_cookie_complete(&desc->vd); /* Restart the next transfer if this driver has a next desc */ diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 914901a680c8..9068591bd684 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -658,6 +658,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc) ret = pm_runtime_get_sync(tdc2dev(tdc)); if (ret < 0) { + pm_runtime_put_noidle(tdc2dev(tdc)); free_irq(tdc->irq, tdc); return ret; } @@ -869,8 +870,10 @@ static int tegra_adma_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); goto rpm_disable; + } ret = tegra_adma_init(tdma); if (ret) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index cc5e56d752c8..125a44d5a69e 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -265,6 +265,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci) if (pvt->model == 0x60) amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); + else + amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); break; case 0x17: @@ -2317,6 +2319,15 @@ static struct amd64_family_type family_types[] = { .dbam_to_cs = f17_addr_mask_to_cs_size, } }, + [F17_M60H_CPUS] = { + .ctl_name = "F17h_M60h", + .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, [F17_M70H_CPUS] = { .ctl_name = "F17h_M70h", .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0, @@ -3366,6 +3377,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) fam_type = &family_types[F17_M30H_CPUS]; pvt->ops = &family_types[F17_M30H_CPUS].ops; break; + } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) { + fam_type = &family_types[F17_M60H_CPUS]; + pvt->ops = &family_types[F17_M60H_CPUS].ops; + break; } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) { fam_type = &family_types[F17_M70H_CPUS]; pvt->ops = &family_types[F17_M70H_CPUS].ops; diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 8c3cda81e619..d280b91f97cb 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -120,6 +120,8 @@ #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F0 0x1448 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F6 0x144e #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446 @@ -291,6 +293,7 @@ enum amd_families { F17_CPUS, F17_M10H_CPUS, F17_M30H_CPUS, + F17_M60H_CPUS, F17_M70H_CPUS, NUM_FAMILIES, }; diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c index 0e7ea3591b78..5e7593753799 100644 --- a/drivers/edac/edac_device_sysfs.c +++ b/drivers/edac/edac_device_sysfs.c @@ -275,6 +275,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) /* Error exit stack */ err_kobj_reg: + kobject_put(&edac_dev->kobj); module_put(edac_dev->owner); err_out: diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 72c9eb9fdffb..53042af7262e 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -386,7 +386,7 @@ static int edac_pci_main_kobj_setup(void) /* Error unwind statck */ kobject_init_and_add_fail: - kfree(edac_pci_top_main_kobj); + kobject_put(edac_pci_top_main_kobj); kzalloc_fail: module_put(THIS_MODULE); diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index a71cca6eeb33..6be7e65f7389 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -1711,9 +1711,9 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, if (uncorrected_error) { core_err_cnt = 1; if (ripv) - tp_event = HW_EVENT_ERR_FATAL; - else tp_event = HW_EVENT_ERR_UNCORRECTED; + else + tp_event = HW_EVENT_ERR_FATAL; } else { tp_event = HW_EVENT_ERR_CORRECTED; } diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index d26300f9cb07..9be43b4f9c50 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -170,6 +170,8 @@ (n << (28 + (2 * skl) - PAGE_SHIFT)) static int nr_channels; +static struct pci_dev *mci_pdev; +static int ie31200_registered = 1; struct ie31200_priv { void __iomem *window; @@ -541,12 +543,16 @@ fail_free: static int ie31200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - edac_dbg(0, "MC:\n"); + int rc; + edac_dbg(0, "MC:\n"); if (pci_enable_device(pdev) < 0) return -EIO; + rc = ie31200_probe1(pdev, ent->driver_data); + if (rc == 0 && !mci_pdev) + mci_pdev = pci_dev_get(pdev); - return ie31200_probe1(pdev, ent->driver_data); + return rc; } static void ie31200_remove_one(struct pci_dev *pdev) @@ -555,6 +561,8 @@ static void ie31200_remove_one(struct pci_dev *pdev) struct ie31200_priv *priv; edac_dbg(0, "\n"); + pci_dev_put(mci_pdev); + mci_pdev = NULL; mci = edac_mc_del_mc(&pdev->dev); if (!mci) return; @@ -596,17 +604,53 @@ static struct pci_driver ie31200_driver = { static int __init ie31200_init(void) { + int pci_rc, i; + edac_dbg(3, "MC:\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); - return pci_register_driver(&ie31200_driver); + pci_rc = pci_register_driver(&ie31200_driver); + if (pci_rc < 0) + goto fail0; + + if (!mci_pdev) { + ie31200_registered = 0; + for (i = 0; ie31200_pci_tbl[i].vendor != 0; i++) { + mci_pdev = pci_get_device(ie31200_pci_tbl[i].vendor, + ie31200_pci_tbl[i].device, + NULL); + if (mci_pdev) + break; + } + if (!mci_pdev) { + edac_dbg(0, "ie31200 pci_get_device fail\n"); + pci_rc = -ENODEV; + goto fail1; + } + pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]); + if (pci_rc < 0) { + edac_dbg(0, "ie31200 init fail\n"); + pci_rc = -ENODEV; + goto fail1; + } + } + return 0; + +fail1: + pci_unregister_driver(&ie31200_driver); +fail0: + pci_dev_put(mci_pdev); + + return pci_rc; } static void __exit ie31200_exit(void) { edac_dbg(3, "MC:\n"); pci_unregister_driver(&ie31200_driver); + if (!ie31200_registered) + ie31200_remove_one(mci_pdev); } module_init(ie31200_init); diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index b1193be1ef1d..dac45e2071b3 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -1155,7 +1155,7 @@ static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m, u32 optypenum = GET_BITFIELD(m->status, 4, 6); int rc; - tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) : + tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) : HW_EVENT_ERR_CORRECTED; /* diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index f743502ca9b7..b557a53c75c4 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -254,18 +254,20 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { * FIXME: Implement the error count reads directly */ -static const u32 correrrcnt[] = { - 0x104, 0x108, 0x10c, 0x110, -}; - #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31) #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30) #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15) #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14) +#if 0 /* Currently unused*/ +static const u32 correrrcnt[] = { + 0x104, 0x108, 0x10c, 0x110, +}; + static const u32 correrrthrsld[] = { 0x11c, 0x120, 0x124, 0x128, }; +#endif #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30) #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14) @@ -1340,7 +1342,7 @@ static void knl_show_mc_route(u32 reg, char *s) */ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes) { - u64 sad_base, sad_size, sad_limit = 0; + u64 sad_base, sad_limit = 0; u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace; int sad_rule = 0; int tad_rule = 0; @@ -1427,7 +1429,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes) edram_only = KNL_EDRAM_ONLY(dram_rule); sad_limit = pvt->info.sad_limit(dram_rule)+1; - sad_size = sad_limit - sad_base; pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[sad_rule], &interleave_reg); @@ -2952,7 +2953,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, struct mem_ctl_info *new_mci; struct sbridge_pvt *pvt = mci->pvt_info; enum hw_event_mc_err_type tp_event; - char *type, *optype, msg[256]; + char *optype, msg[256]; bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); bool overflow = GET_BITFIELD(m->status, 62, 62); bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); @@ -2981,14 +2982,11 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, if (uncorrected_error) { core_err_cnt = 1; if (ripv) { - type = "FATAL"; - tp_event = HW_EVENT_ERR_FATAL; - } else { - type = "NON_FATAL"; tp_event = HW_EVENT_ERR_UNCORRECTED; + } else { + tp_event = HW_EVENT_ERR_FATAL; } } else { - type = "CORRECTED"; tp_event = HW_EVENT_ERR_CORRECTED; } @@ -3200,7 +3198,6 @@ static struct notifier_block sbridge_mce_dec = { static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) { struct mem_ctl_info *mci = sbridge_dev->mci; - struct sbridge_pvt *pvt; if (unlikely(!mci || !mci->pvt_info)) { edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev); @@ -3209,8 +3206,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) return; } - pvt = mci->pvt_info; - edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &sbridge_dev->pdev[0]->dev); diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 2177ad765bd1..99dea4f66b5e 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -475,7 +475,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci, struct decoded_addr *res) { enum hw_event_mc_err_type tp_event; - char *type, *optype; + char *optype; bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); bool overflow = GET_BITFIELD(m->status, 62, 62); bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); @@ -490,14 +490,11 @@ static void skx_mce_output_error(struct mem_ctl_info *mci, if (uncorrected_error) { core_err_cnt = 1; if (ripv) { - type = "FATAL"; - tp_event = HW_EVENT_ERR_FATAL; - } else { - type = "NON_FATAL"; tp_event = HW_EVENT_ERR_UNCORRECTED; + } else { + tp_event = HW_EVENT_ERR_FATAL; } } else { - type = "CORRECTED"; tp_event = HW_EVENT_ERR_CORRECTED; } diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c index ad02dc6747a4..0317b614b680 100644 --- a/drivers/extcon/extcon-adc-jack.c +++ b/drivers/extcon/extcon-adc-jack.c @@ -124,7 +124,7 @@ static int adc_jack_probe(struct platform_device *pdev) for (i = 0; data->adc_conditions[i].id != EXTCON_NONE; i++); data->num_conditions = i; - data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel); + data->chan = devm_iio_channel_get(&pdev->dev, pdata->consumer_channel); if (IS_ERR(data->chan)) return PTR_ERR(data->chan); @@ -164,7 +164,6 @@ static int adc_jack_remove(struct platform_device *pdev) free_irq(data->irq, data); cancel_work_sync(&data->handler.work); - iio_channel_release(data->chan); return 0; } diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index 87f737e01473..041f8152272b 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -85,7 +85,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) for (i = 0; i < num_domains; i++, scmi_pd++) { u32 state; - domains[i] = &scmi_pd->genpd; + if (handle->power_ops->state_get(handle, i, &state)) { + dev_warn(dev, "failed to get state for domain %d\n", i); + continue; + } scmi_pd->domain = i; scmi_pd->handle = handle; @@ -94,13 +97,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd->genpd.power_off = scmi_pd_power_off; scmi_pd->genpd.power_on = scmi_pd_power_on; - if (handle->power_ops->state_get(handle, i, &state)) { - dev_warn(dev, "failed to get state for domain %d\n", i); - continue; - } - pm_genpd_init(&scmi_pd->genpd, NULL, state == SCMI_POWER_STATE_GENERIC_OFF); + + domains[i] = &scmi_pd->genpd; } scmi_pd_data->domains = domains; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index b248870a9806..6a6b412206ec 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -219,3 +219,14 @@ config EFI_EARLYCON depends on SERIAL_EARLYCON && !ARM && !IA64 select FONT_SUPPORT select ARCH_USE_MEMREMAP_PROT + +config EFI_CUSTOM_SSDT_OVERLAYS + bool "Load custom ACPI SSDT overlay from an EFI variable" + depends on EFI_VARS && ACPI + default ACPI_TABLE_UPGRADE + help + Allow loading of an ACPI SSDT overlay from an EFI variable specified + by a kernel command line option. + + See Documentation/admin-guide/acpi/ssdt-overlays.rst for more + information. diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index e3861d267d9a..b299e22b7532 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -217,7 +217,7 @@ static void generic_ops_unregister(void) efivars_unregister(&generic_efivars); } -#if IS_ENABLED(CONFIG_ACPI) +#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS #define EFIVAR_SSDT_NAME_MAX 16 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; static int __init efivar_ssdt_setup(char *str) @@ -345,6 +345,7 @@ static int __init efisubsys_init(void) efi_kobj = kobject_create_and_add("efi", firmware_kobj); if (!efi_kobj) { pr_err("efi: Firmware registration failed.\n"); + destroy_workqueue(efi_rts_wq); return -ENOMEM; } @@ -381,6 +382,7 @@ err_unregister: generic_ops_unregister(); err_put: kobject_put(efi_kobj); + destroy_workqueue(efi_rts_wq); return error; } diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index d6dd5f503fa2..e8f71a50ba89 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -181,7 +181,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num) rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL, "entry%d", entry_num); if (rc) { - kfree(entry); + kobject_put(&entry->kobj); return rc; } } diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index ee0661ddb25b..8c5b5529dbc0 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -28,6 +28,7 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ -D__NO_FORTIFY \ $(call cc-option,-ffreestanding) \ $(call cc-option,-fno-stack-protector) \ + $(call cc-option,-fno-addrsig) \ -D__DISABLE_EXPORTS GCOV_PROFILE := n diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index fd634005a98e..669abc065a2b 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -411,6 +411,7 @@ static int imx_scu_probe(struct platform_device *pdev) if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to request mbox chan %s ret %d\n", chan_name, ret); + kfree(chan_name); return ret; } diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index 6a445397771c..03eb798ad3ed 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c @@ -157,8 +157,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups), GFP_KERNEL); - if (!cpu_groups) + if (!cpu_groups) { + free_cpumask_var(tmp); return -ENOMEM; + } cpumask_copy(tmp, cpu_online_mask); @@ -167,6 +169,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) topology_core_cpumask(cpumask_any(tmp)); if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) { + free_cpumask_var(tmp); free_cpu_groups(num_groups, &cpu_groups); return -ENOMEM; } diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 4802ab170fe5..b9fdc20b4eb9 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -441,8 +440,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, struct qcom_scm_mem_map_info *mem_to_map; phys_addr_t mem_to_map_phys; phys_addr_t dest_phys; - phys_addr_t ptr_phys; - dma_addr_t ptr_dma; + dma_addr_t ptr_phys; size_t mem_to_map_sz; size_t dest_sz; size_t src_sz; @@ -459,10 +457,9 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(dest_sz, SZ_64); - ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL); + ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); if (!ptr) return -ENOMEM; - ptr_phys = dma_to_phys(__scm->dev, ptr_dma); /* Fill source vmid detail */ src = ptr; @@ -490,7 +487,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, ptr_phys, src_sz, dest_phys, dest_sz); - dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma); + dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); if (ret) { dev_err(__scm->dev, "Assign memory protection call failed %d\n", ret); diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 039e0f91dba8..6945c3c96637 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, fw_cfg_sel_ko, "%d", entry->select); - if (err) - goto err_register; + if (err) { + kobject_put(&entry->kobj); + return err; + } /* add raw binary content access */ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); @@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) err_add_raw: kobject_del(&entry->kobj); -err_register: kfree(entry); return err; } diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c index 62f924489db5..5942343a5d6e 100644 --- a/drivers/fpga/dfl-afu-dma-region.c +++ b/drivers/fpga/dfl-afu-dma-region.c @@ -61,10 +61,10 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, region->pages); if (pinned < 0) { ret = pinned; - goto put_pages; + goto free_pages; } else if (pinned != npages) { ret = -EFAULT; - goto free_pages; + goto put_pages; } dev_dbg(dev, "%d pages pinned\n", pinned); diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index e4a34dc7947f..041d23469238 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -83,7 +83,8 @@ int __afu_port_disable(struct platform_device *pdev) * on this port and minimum soft reset pulse width has elapsed. * Driver polls port_soft_reset_ack to determine if reset done by HW. */ - if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST, + if (readq_poll_timeout(base + PORT_HDR_CTRL, v, + v & PORT_CTRL_SFTRST_ACK, RST_POLL_INVL, RST_POLL_TIMEOUT)) { dev_err(&pdev->dev, "timeout, fail to reset device\n"); return -ETIMEDOUT; diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 538755062ab7..a78c409bf2c4 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -227,7 +227,6 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) { struct cci_drvdata *drvdata = pci_get_drvdata(pcidev); struct dfl_fpga_cdev *cdev = drvdata->cdev; - int ret = 0; if (!num_vfs) { /* @@ -239,6 +238,8 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) dfl_fpga_cdev_config_ports_pf(cdev); } else { + int ret; + /* * before enable SRIOV, put released ports into VF access mode * first of all. diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c index effed3a8d398..2ecb1d3e8eeb 100644 --- a/drivers/gnss/sirf.c +++ b/drivers/gnss/sirf.c @@ -439,14 +439,18 @@ static int sirf_probe(struct serdev_device *serdev) data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff", GPIOD_OUT_LOW); - if (IS_ERR(data->on_off)) + if (IS_ERR(data->on_off)) { + ret = PTR_ERR(data->on_off); goto err_put_device; + } if (data->on_off) { data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup", GPIOD_IN); - if (IS_ERR(data->wakeup)) + if (IS_ERR(data->wakeup)) { + ret = PTR_ERR(data->wakeup); goto err_put_device; + } ret = regulator_enable(data->vcc); if (ret) diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c index 5640efe5e750..5bda38e0780f 100644 --- a/drivers/gpio/gpio-arizona.c +++ b/drivers/gpio/gpio-arizona.c @@ -64,6 +64,7 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset) ret = pm_runtime_get_sync(chip->parent); if (ret < 0) { dev_err(chip->parent, "Failed to resume: %d\n", ret); + pm_runtime_put_autosuspend(chip->parent); return ret; } @@ -72,12 +73,15 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset) if (ret < 0) { dev_err(chip->parent, "Failed to drop cache: %d\n", ret); + pm_runtime_put_autosuspend(chip->parent); return ret; } ret = regmap_read(arizona->regmap, reg, &val); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(chip->parent); return ret; + } pm_runtime_mark_last_busy(chip->parent); pm_runtime_put_autosuspend(chip->parent); @@ -106,6 +110,7 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip, ret = pm_runtime_get_sync(chip->parent); if (ret < 0) { dev_err(chip->parent, "Failed to resume: %d\n", ret); + pm_runtime_put(chip->parent); return ret; } } diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 92e127e74813..ed6061b5cca1 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -49,7 +49,9 @@ #define GPIO_EXT_PORTC 0x58 #define GPIO_EXT_PORTD 0x5c +#define DWAPB_DRIVER_NAME "gpio-dwapb" #define DWAPB_MAX_PORTS 4 + #define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */ #define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */ #define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */ @@ -398,7 +400,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, return; err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, - "gpio-dwapb", handle_level_irq, + DWAPB_DRIVER_NAME, handle_level_irq, IRQ_NOREQUEST, 0, IRQ_GC_INIT_NESTED_LOCK); if (err) { @@ -455,7 +457,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, */ err = devm_request_irq(gpio->dev, pp->irq[0], dwapb_irq_handler_mfd, - IRQF_SHARED, "gpio-dwapb-mfd", gpio); + IRQF_SHARED, DWAPB_DRIVER_NAME, gpio); if (err) { dev_err(gpio->dev, "error requesting IRQ\n"); irq_domain_remove(gpio->domain); @@ -533,26 +535,33 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio, dwapb_configure_irqs(gpio, port, pp); err = gpiochip_add_data(&port->gc, port); - if (err) + if (err) { dev_err(gpio->dev, "failed to register gpiochip for port%d\n", port->idx); - else - port->is_registered = true; + return err; + } /* Add GPIO-signaled ACPI event support */ - if (pp->has_irq) - acpi_gpiochip_request_interrupts(&port->gc); + acpi_gpiochip_request_interrupts(&port->gc); - return err; + port->is_registered = true; + + return 0; } static void dwapb_gpio_unregister(struct dwapb_gpio *gpio) { unsigned int m; - for (m = 0; m < gpio->nr_ports; ++m) - if (gpio->ports[m].is_registered) - gpiochip_remove(&gpio->ports[m].gc); + for (m = 0; m < gpio->nr_ports; ++m) { + struct dwapb_gpio_port *port = &gpio->ports[m]; + + if (!port->is_registered) + continue; + + acpi_gpiochip_free_interrupts(&port->gc); + gpiochip_remove(&port->gc); + } } static struct dwapb_platform_data * @@ -836,7 +845,7 @@ static SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops, dwapb_gpio_suspend, static struct platform_driver dwapb_gpio_driver = { .driver = { - .name = "gpio-dwapb", + .name = DWAPB_DRIVER_NAME, .pm = &dwapb_gpio_pm_ops, .of_match_table = of_match_ptr(dwapb_of_match), .acpi_match_table = ACPI_PTR(dwapb_acpi_match), @@ -850,3 +859,4 @@ module_platform_driver(dwapb_gpio_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamie Iles"); MODULE_DESCRIPTION("Synopsys DesignWare APB GPIO driver"); +MODULE_ALIAS("platform:" DWAPB_DRIVER_NAME); diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 12f12da3e4f3..effcafbc09bf 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -108,6 +108,84 @@ static const struct i2c_device_id pca953x_id[] = { }; MODULE_DEVICE_TABLE(i2c, pca953x_id); +#ifdef CONFIG_GPIO_PCA953X_IRQ + +#include +#include +#include + +static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = { + { + /* + * On Intel Galileo Gen 2 board the IRQ pin of one of + * the I²C GPIO expanders, which has GpioInt() resource, + * is provided as an absolute number instead of being + * relative. Since first controller (gpio-sch.c) and + * second (gpio-dwapb.c) are at the fixed bases, we may + * safely refer to the number in the global space to get + * an IRQ out of it. + */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), + }, + }, + {} +}; + +#ifdef CONFIG_ACPI +static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data) +{ + struct acpi_resource_gpio *agpio; + int *pin = data; + + if (acpi_gpio_get_irq_resource(ares, &agpio)) + *pin = agpio->pin_table[0]; + return 1; +} + +static int pca953x_acpi_find_pin(struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + int pin = -ENOENT, ret; + LIST_HEAD(r); + + ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin); + acpi_dev_free_resource_list(&r); + if (ret < 0) + return ret; + + return pin; +} +#else +static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; } +#endif + +static int pca953x_acpi_get_irq(struct device *dev) +{ + int pin, ret; + + pin = pca953x_acpi_find_pin(dev); + if (pin < 0) + return pin; + + dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin); + + if (!gpio_is_valid(pin)) + return -EINVAL; + + ret = gpio_request(pin, "pca953x interrupt"); + if (ret) + return ret; + + ret = gpio_to_irq(pin); + + /* When pin is used as an IRQ, no need to keep it requested */ + gpio_free(pin); + + return ret; +} +#endif + static const struct acpi_device_id pca953x_acpi_ids[] = { { "INT3491", 16 | PCA953X_TYPE | PCA_LATCH_INT, }, { } @@ -307,8 +385,23 @@ static const struct regmap_config pca953x_i2c_regmap = { .volatile_reg = pca953x_volatile_register, .cache_type = REGCACHE_RBTREE, - /* REVISIT: should be 0x7f but some 24 bit chips use REG_ADDR_AI */ - .max_register = 0xff, + .max_register = 0x7f, +}; + +static const struct regmap_config pca953x_ai_i2c_regmap = { + .reg_bits = 8, + .val_bits = 8, + + .read_flag_mask = REG_ADDR_AI, + .write_flag_mask = REG_ADDR_AI, + + .readable_reg = pca953x_readable_register, + .writeable_reg = pca953x_writeable_register, + .volatile_reg = pca953x_volatile_register, + + .disable_locking = true, + .cache_type = REGCACHE_RBTREE, + .max_register = 0x7f, }; static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off, @@ -319,18 +412,6 @@ static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off, int pinctrl = (reg & PCAL_PINCTRL_MASK) << 1; u8 regaddr = pinctrl | addr | (off / BANK_SZ); - /* Single byte read doesn't need AI bit set. */ - if (!addrinc) - return regaddr; - - /* Chips with 24 and more GPIOs always support Auto Increment */ - if (write && NBANK(chip) > 2) - regaddr |= REG_ADDR_AI; - - /* PCA9575 needs address-increment on multi-byte writes */ - if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) - regaddr |= REG_ADDR_AI; - return regaddr; } @@ -771,6 +852,12 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, u8 reg_direction[MAX_BANK]; int ret, i; + if (dmi_first_match(pca953x_dmi_acpi_irq_info)) { + ret = pca953x_acpi_get_irq(&client->dev); + if (ret > 0) + client->irq = ret; + } + if (!client->irq) return 0; @@ -898,6 +985,7 @@ static int pca953x_probe(struct i2c_client *client, int ret; u32 invert = 0; struct regulator *reg; + const struct regmap_config *regmap_config; chip = devm_kzalloc(&client->dev, sizeof(struct pca953x_chip), GFP_KERNEL); @@ -965,7 +1053,17 @@ static int pca953x_probe(struct i2c_client *client, i2c_set_clientdata(client, chip); - chip->regmap = devm_regmap_init_i2c(client, &pca953x_i2c_regmap); + pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK); + + if (NBANK(chip) > 2 || PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) { + dev_info(&client->dev, "using AI\n"); + regmap_config = &pca953x_ai_i2c_regmap; + } else { + dev_info(&client->dev, "using no AI\n"); + regmap_config = &pca953x_i2c_regmap; + } + + chip->regmap = devm_regmap_init_i2c(client, regmap_config); if (IS_ERR(chip->regmap)) { ret = PTR_ERR(chip->regmap); goto err_exit; @@ -1000,7 +1098,6 @@ static int pca953x_probe(struct i2c_client *client, /* initialize cached registers from their original values. * we can't share this chip with another i2c master. */ - pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK); if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE) { chip->regs = &pca953x_regs; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index daf687428cdb..663314f807fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -150,6 +150,7 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev) (mode_info->atom_context->bios + data_offset); switch (crev) { case 11: + case 12: mem_channel_number = igp_info->v11.umachannelnumber; /* channel width is 64 */ return mem_channel_number * 64; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index ece55c8fa673..cda0a76a733d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -719,8 +719,10 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (encoder) { @@ -857,8 +859,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = amdgpu_connector_best_single_encoder(connector); @@ -980,8 +984,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { @@ -1330,8 +1336,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 1e25ca34d876..700e26b69abc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -990,27 +990,37 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) { struct amdgpu_job *job; - struct drm_sched_job *s_job; + struct drm_sched_job *s_job, *tmp; uint32_t preempt_seq; struct dma_fence *fence, **ptr; struct amdgpu_fence_driver *drv = &ring->fence_drv; struct drm_gpu_scheduler *sched = &ring->sched; + bool preempted = true; if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) return; preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); - if (preempt_seq <= atomic_read(&drv->last_seq)) - return; + if (preempt_seq <= atomic_read(&drv->last_seq)) { + preempted = false; + goto no_preempt; + } preempt_seq &= drv->num_fences_mask; ptr = &drv->fences[preempt_seq]; fence = rcu_dereference_protected(*ptr, 1); +no_preempt: spin_lock(&sched->job_list_lock); - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + if (dma_fence_is_signaled(&s_job->s_fence->finished)) { + /* remove job from ring_mirror_list */ + list_del_init(&s_job->node); + sched->ops->free_job(s_job); + continue; + } job = to_amdgpu_job(s_job); - if (job->fence == fence) + if (preempted && job->fence == fence) /* mark the job as preempted */ job->preemption_status |= AMDGPU_IB_PREEMPTED; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 82efc1e22e61..e0aed42d9cbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -282,7 +282,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, ret = pm_runtime_get_sync(dev->dev); if (ret < 0) - return ret; + goto out; ret = drm_crtc_helper_set_config(set, ctx); @@ -297,7 +297,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, take the current one */ if (active && !adev->have_disp_power_ref) { adev->have_disp_power_ref = true; - return ret; + goto out; } /* if we have no active crtcs, then drop the power ref we got before */ @@ -306,6 +306,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, adev->have_disp_power_ref = false; } +out: /* drop the power reference we got coming in here */ pm_runtime_put_autosuspend(dev->dev); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 05d114a72ca1..fa2c0f29ad4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1286,11 +1286,12 @@ long amdgpu_drm_ioctl(struct file *filp, dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); if (ret < 0) - return ret; + goto out; ret = drm_ioctl(filp, cmd, arg); pm_runtime_mark_last_busy(dev->dev); +out: pm_runtime_put_autosuspend(dev->dev); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 23085b352cf2..c212d5fc665c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -404,7 +404,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; } amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); - amdgpu_irq_get(adev, irq_src, irq_type); + + if (irq_src) + amdgpu_irq_get(adev, irq_src, irq_type); ring->fence_drv.irq_src = irq_src; ring->fence_drv.irq_type = irq_type; @@ -539,8 +541,9 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) /* no need to trigger GPU reset as we are unloading */ amdgpu_fence_driver_force_completion(ring); } - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); drm_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) @@ -576,8 +579,9 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) } /* disable the interrupt */ - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); } } @@ -603,8 +607,9 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) continue; /* enable the interrupt */ - amdgpu_irq_get(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_get(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 8ceb44925947..5fa5158d18ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -161,16 +161,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, struct amdgpu_bo_list_entry vm_pd; struct list_head list, duplicates; + struct dma_fence *fence = NULL; struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; struct amdgpu_bo_va *bo_va; - int r; + long r; INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&duplicates); tv.bo = &bo->tbo; - tv.num_shared = 1; + tv.num_shared = 2; list_add(&tv.head, &list); amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); @@ -178,28 +179,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false); if (r) { dev_err(adev->dev, "leaking bo va because " - "we fail to reserve bo (%d)\n", r); + "we fail to reserve bo (%ld)\n", r); return; } bo_va = amdgpu_vm_bo_find(vm, bo); - if (bo_va && --bo_va->ref_count == 0) { - amdgpu_vm_bo_rmv(adev, bo_va); + if (!bo_va || --bo_va->ref_count) + goto out_unlock; - if (amdgpu_vm_ready(vm)) { - struct dma_fence *fence = NULL; + amdgpu_vm_bo_rmv(adev, bo_va); + if (!amdgpu_vm_ready(vm)) + goto out_unlock; - r = amdgpu_vm_clear_freed(adev, vm, &fence); - if (unlikely(r)) { - dev_err(adev->dev, "failed to clear page " - "tables on GEM object close (%d)\n", r); - } - - if (fence) { - amdgpu_bo_fence(bo, fence, true); - dma_fence_put(fence); - } - } + fence = dma_resv_get_excl(bo->tbo.base.resv); + if (fence) { + amdgpu_bo_fence(bo, fence, true); + fence = NULL; } + + r = amdgpu_vm_clear_freed(adev, vm, &fence); + if (r || !fence) + goto out_unlock; + + amdgpu_bo_fence(bo, fence, true); + dma_fence_put(fence); + +out_unlock: + if (unlikely(r < 0)) + dev_err(adev->dev, "failed to clear page " + "tables on GEM object close (%ld)\n", r); ttm_eu_backoff_reservation(&ticket, &list); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 96b2a31ccfed..f06a5142d66e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -36,7 +36,8 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) memset(&ti, 0, sizeof(struct amdgpu_task_info)); - if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { + if (amdgpu_gpu_recovery && + amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { DRM_ERROR("ring %s timeout, but soft recovered\n", s_job->sched->name); return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a73206784cba..59fd9ebf3a58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -638,8 +638,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file * in the bitfields */ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) se_num = 0xffffffff; + else if (se_num >= AMDGPU_GFX_MAX_SE) + return -EINVAL; if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; + else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) + return -EINVAL; if (info->read_mmr_reg.count > 128) return -EINVAL; @@ -667,9 +671,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return n ? -EFAULT : 0; } case AMDGPU_INFO_DEV_INFO: { - struct drm_amdgpu_info_device dev_info = {}; + struct drm_amdgpu_info_device dev_info; uint64_t vm_size; + memset(&dev_info, 0, sizeof(dev_info)); dev_info.device_id = dev->pdev->device; dev_info.chip_rev = adev->rev_id; dev_info.external_rev = adev->external_rev_id; @@ -975,7 +980,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) r = pm_runtime_get_sync(dev->dev); if (r < 0) - return r; + goto pm_put; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { @@ -1026,6 +1031,7 @@ error_pasid: out_suspend: pm_runtime_mark_last_busy(dev->dev); +pm_put: pm_runtime_put_autosuspend(dev->dev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 51263b8d94b1..3f744e72912f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -370,6 +370,15 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, if (current_level == level) return count; + if (adev->asic_type == CHIP_RAVEN) { + if (adev->rev_id < 8) { + if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL) + amdgpu_gfx_off_ctrl(adev, false); + else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL) + amdgpu_gfx_off_ctrl(adev, true); + } + } + /* profile_exit setting is valid only when current mode is in profile mode */ if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | @@ -416,8 +425,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, ret = smu_get_power_num_states(&adev->smu, &data); if (ret) return ret; - } else if (adev->powerplay.pp_funcs->get_pp_num_states) + } else if (adev->powerplay.pp_funcs->get_pp_num_states) { amdgpu_dpm_get_pp_num_states(adev, &data); + } else { + memset(&data, 0, sizeof(data)); + } buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); for (i = 0; i < data.nums; i++) @@ -2089,7 +2101,7 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000); + return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000); } static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, @@ -2119,7 +2131,7 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000); + return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000); } static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c7514f743409..6335bd4ae374 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2867,10 +2867,17 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); - if (vm->use_cpu_for_update) + if (vm->use_cpu_for_update) { + /* Sync with last SDMA update/clear before switching to CPU */ + r = amdgpu_bo_sync_wait(vm->root.base.bo, + AMDGPU_FENCE_OWNER_UNDEFINED, true); + if (r) + goto free_idr; + vm->update_funcs = &amdgpu_vm_cpu_funcs; - else + } else { vm->update_funcs = &amdgpu_vm_sdma_funcs; + } dma_fence_put(vm->last_update); vm->last_update = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 6f118292e40f..19876c90be0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4094,10 +4094,8 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | - RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); - - /* only for Vega10 & Raven1 */ - data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK); if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); @@ -4683,12 +4681,17 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; - if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); return -ENOMEM; + } /* assert preemption condition */ amdgpu_ring_set_preempt_cond_exec(ring, false); @@ -4699,6 +4702,8 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) ++ring->trail_seq); amdgpu_ring_commit(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + /* poll the trailing fence */ for (i = 0; i < adev->usec_timeout; i++) { if (ring->trail_seq == diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index bd715012185c..465351184bc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -286,30 +286,20 @@ static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring) static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u64 *wptr = NULL; - uint64_t local_wptr = 0; + u64 wptr; if (ring->use_doorbell) { /* XXX check if swapping is necessary on BE */ - wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); - DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); - *wptr = (*wptr) >> 2; - DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr); + wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); + DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); } else { - u32 lowbit, highbit; - - wptr = &local_wptr; - lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2; - highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; - - DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", - ring->me, highbit, lowbit); - *wptr = highbit; - *wptr = (*wptr) << 32; - *wptr |= lowbit; + wptr = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)); + wptr = wptr << 32; + wptr |= RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)); + DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr); } - return *wptr; + return wptr >> 2; } /** @@ -1273,8 +1263,12 @@ static int sdma_v5_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - for (i = 0; i < adev->sdma.num_instances; i++) + for (i = 0; i < adev->sdma.num_instances; i++) { + if (adev->sdma.instance[i].fw != NULL) + release_firmware(adev->sdma.instance[i].fw); + amdgpu_ring_fini(&adev->sdma.instance[i].ring); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 40e3fc0c6942..aa0a617b8d44 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -312,6 +312,7 @@ struct kfd_process *kfd_create_process(struct file *filep) (int)process->lead_thread->pid); if (ret) { pr_warn("Creating procfs pid directory failed"); + kobject_put(process->kobj); goto out; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 7551761f2aa9..a49e2ab071d6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -612,8 +612,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, ret = kobject_init_and_add(dev->kobj_node, &node_type, sys_props.kobj_nodes, "%d", id); - if (ret < 0) + if (ret < 0) { + kobject_put(dev->kobj_node); return ret; + } dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node); if (!dev->kobj_mem) @@ -660,8 +662,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, return -ENOMEM; ret = kobject_init_and_add(mem->kobj, &mem_type, dev->kobj_mem, "%d", i); - if (ret < 0) + if (ret < 0) { + kobject_put(mem->kobj); return ret; + } mem->attr.name = "properties"; mem->attr.mode = KFD_SYSFS_FILE_MODE; @@ -679,8 +683,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, return -ENOMEM; ret = kobject_init_and_add(cache->kobj, &cache_type, dev->kobj_cache, "%d", i); - if (ret < 0) + if (ret < 0) { + kobject_put(cache->kobj); return ret; + } cache->attr.name = "properties"; cache->attr.mode = KFD_SYSFS_FILE_MODE; @@ -698,8 +704,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, return -ENOMEM; ret = kobject_init_and_add(iolink->kobj, &iolink_type, dev->kobj_iolink, "%d", i); - if (ret < 0) + if (ret < 0) { + kobject_put(iolink->kobj); return ret; + } iolink->attr.name = "properties"; iolink->attr.mode = KFD_SYSFS_FILE_MODE; @@ -779,8 +787,10 @@ static int kfd_topology_update_sysfs(void) ret = kobject_init_and_add(sys_props.kobj_topology, &sysprops_type, &kfd_device->kobj, "topology"); - if (ret < 0) + if (ret < 0) { + kobject_put(sys_props.kobj_topology); return ret; + } sys_props.kobj_nodes = kobject_create_and_add("nodes", sys_props.kobj_topology); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9fd12e108a70..60e50181f6d3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -928,8 +928,13 @@ static int dm_late_init(void *handle) struct dmcu_iram_parameters params; unsigned int linear_lut[16]; int i; - struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; - bool ret = false; + struct dmcu *dmcu = NULL; + bool ret; + + if (!adev->dm.fw_dmcu) + return detect_mst_link_for_all_connectors(adev->ddev); + + dmcu = adev->dm.dc->res_pool->dmcu; for (i = 0; i < 16; i++) linear_lut[i] = 0xFFFF * i / 15; @@ -945,13 +950,10 @@ static int dm_late_init(void *handle) */ params.min_abm_backlight = 0x28F; - /* todo will enable for navi10 */ - if (adev->asic_type <= CHIP_RAVEN) { - ret = dmcu_load_iram(dmcu, params); + ret = dmcu_load_iram(dmcu, params); - if (!ret) - return -EINVAL; - } + if (!ret) + return -EINVAL; return detect_mst_link_for_all_connectors(adev->ddev); } @@ -1432,6 +1434,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) drm_connector_update_edid_property(connector, aconnector->edid); + drm_add_edid_modes(connector, aconnector->edid); if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, @@ -2040,12 +2043,18 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) &dm_atomic_state_funcs); r = amdgpu_display_modeset_create_props(adev); - if (r) + if (r) { + dc_release_state(state->context); + kfree(state); return r; + } r = amdgpu_dm_audio_init(adev); - if (r) + if (r) { + dc_release_state(state->context); + kfree(state); return r; + } return 0; } @@ -2061,6 +2070,8 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm) #if defined(CONFIG_ACPI) struct amdgpu_dm_backlight_caps caps; + memset(&caps, 0, sizeof(caps)); + if (dm->backlight_caps.caps_valid) return; @@ -7226,6 +7237,30 @@ cleanup: *out_type = update_type; return ret; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) +{ + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct amdgpu_dm_connector *aconnector = NULL; + int i; + for_each_new_connector_in_state(state, connector, conn_state, i) { + if (conn_state->crtc != crtc) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->port || !aconnector->mst_port) + aconnector = NULL; + else + break; + } + + if (!aconnector) + return 0; + + return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr); +} +#endif /** * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. @@ -7279,6 +7314,40 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; + /* Check connector changes */ + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + + /* Skip connectors that are disabled or part of modeset already. */ + if (!old_con_state->crtc && !new_con_state->crtc) + continue; + + if (!new_con_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); + if (IS_ERR(new_crtc_state)) { + ret = PTR_ERR(new_crtc_state); + goto fail; + } + + if (dm_old_con_state->abm_level != + dm_new_con_state->abm_level) + new_crtc_state->connectors_changed = true; + } + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (adev->asic_type >= CHIP_NAVI10) { + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { + ret = add_affected_mst_dsc_crtcs(state, crtc); + if (ret) + goto fail; + } + } + } +#endif for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->color_mgmt_changed && @@ -7462,20 +7531,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, * the same resource. If we have a new DC context as part of * the DM atomic state from validation we need to free it and * retain the existing one instead. + * + * Furthermore, since the DM atomic state only contains the DC + * context and can safely be annulled, we can free the state + * and clear the associated private object now to free + * some memory and avoid a possible use-after-free later. */ - struct dm_atomic_state *new_dm_state, *old_dm_state; - new_dm_state = dm_atomic_get_new_state(state); - old_dm_state = dm_atomic_get_old_state(state); + for (i = 0; i < state->num_private_objs; i++) { + struct drm_private_obj *obj = state->private_objs[i].ptr; - if (new_dm_state && old_dm_state) { - if (new_dm_state->context) - dc_release_state(new_dm_state->context); + if (obj->funcs == adev->dm.atomic_obj.funcs) { + int j = state->num_private_objs-1; - new_dm_state->context = old_dm_state->context; + dm_atomic_destroy_state(obj, + state->private_objs[i].state); - if (old_dm_state->context) - dc_retain_state(old_dm_state->context); + /* If i is not at the end of the array then the + * last element needs to be moved to where i was + * before the array can safely be truncated. + */ + if (i != j) + state->private_objs[i] = + state->private_objs[j]; + + state->private_objs[j].ptr = NULL; + state->private_objs[j].state = NULL; + state->private_objs[j].old_state = NULL; + state->private_objs[j].new_state = NULL; + + state->num_private_objs = j; + break; + } } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 28a6c7b2ef4b..2f858507ca70 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -101,7 +101,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); - if (payload.write) + if (payload.write && result >= 0) result = msg->size; if (result < 0) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 785322cd4c6c..7241d4c20778 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -530,6 +530,8 @@ bool dm_pp_get_static_clocks( &pp_clk_info); else if (adev->smu.funcs) ret = smu_get_current_clocks(&adev->smu, &pp_clk_info); + else + return false; if (ret) return false; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 221e0f56389f..823843cd2613 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info( /* Sort voltage table from low to high*/ if (result == BP_RESULT_OK) { - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info( info->disp_clk_voltage[j].max_supported_clk < info->disp_clk_voltage[j-1].max_supported_clk) { /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; + swap(info->disp_clk_voltage[j - 1], + info->disp_clk_voltage[j]); } } } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index dff65c0fe82f..7873abea4112 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1613,8 +1613,6 @@ static enum bp_result construct_integrated_info( struct atom_common_table_header *header; struct atom_data_revision revision; - - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -1644,10 +1642,8 @@ static enum bp_result construct_integrated_info( info->disp_clk_voltage[j-1].max_supported_clk ) { /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; + swap(info->disp_clk_voltage[j - 1], + info->disp_clk_voltage[j]); } } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 47f529ce280a..2718396083ee 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -85,12 +85,77 @@ static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, stru return disp_clk_threshold; } -static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks) +static void ramp_up_dispclk_with_dpp( + struct clk_mgr_internal *clk_mgr, + struct dc *dc, + struct dc_clocks *new_clocks, + bool safe_to_lower) { int i; int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks); bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; + /* this function is to change dispclk, dppclk and dprefclk according to + * bandwidth requirement. Its call stack is rv1_update_clocks --> + * update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth + * --> prepare_bandwidth / optimize_bandwidth. before change dcn hw, + * prepare_bandwidth will be called first to allow enough clock, + * watermark for change, after end of dcn hw change, optimize_bandwidth + * is executed to lower clock to save power for new dcn hw settings. + * + * below is sequence of commit_planes_for_stream: + * + * step 1: prepare_bandwidth - raise clock to have enough bandwidth + * step 2: lock_doublebuffer_enable + * step 3: pipe_control_lock(true) - make dchubp register change will + * not take effect right way + * step 4: apply_ctx_for_surface - program dchubp + * step 5: pipe_control_lock(false) - dchubp register change take effect + * step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream + * for full_date, optimize clock to save power + * + * at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be + * changed for new dchubp configuration. but real dcn hub dchubps are + * still running with old configuration until end of step 5. this need + * clocks settings at step 1 should not less than that before step 1. + * this is checked by two conditions: 1. if (should_set_clock(safe_to_lower + * , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) || + * new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) + * 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz + * + * the second condition is based on new dchubp configuration. dppclk + * for new dchubp may be different from dppclk before step 1. + * for example, before step 1, dchubps are as below: + * pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979) + * pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080) + * for dppclk for pipe0 need dppclk = dispclk + * + * new dchubp pipe split configuration: + * pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080) + * pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080) + * dppclk only needs dppclk = dispclk /2. + * + * dispclk, dppclk are not lock by otg master lock. they take effect + * after step 1. during this transition, dispclk are the same, but + * dppclk is changed to half of previous clock for old dchubp + * configuration between step 1 and step 6. This may cause p-state + * warning intermittently. + * + * for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we + * need make sure dppclk are not changed to less between step 1 and 6. + * for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz, + * new display clock is raised, but we do not know ratio of + * new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz, + * new_clocks->dispclk_khz /2 does not guarantee equal or higher than + * old dppclk. we could ignore power saving different between + * dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6. + * as long as safe_to_lower = false, set dpclk = dispclk to simplify + * condition check. + * todo: review this change for other asic. + **/ + if (!safe_to_lower) + request_dpp_div = false; + /* set disp clk to dpp clk threshold */ clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold); @@ -206,7 +271,7 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base, /* program dispclk on = as a w/a for sleep resume clock ramping issues */ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) { - ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks); + ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; send_request_to_lower = true; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 2028dc017f7a..68d56a91d44b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -907,15 +907,11 @@ static void program_timing_sync( /* set first pipe with plane as master */ for (j = 0; j < group_size; j++) { - struct pipe_ctx *temp; - if (pipe_set[j]->plane_state) { if (j == 0) break; - temp = pipe_set[0]; - pipe_set[0] = pipe_set[j]; - pipe_set[j] = temp; + swap(pipe_set[0], pipe_set[j]); break; } } @@ -2230,6 +2226,14 @@ void dc_commit_updates_for_stream(struct dc *dc, copy_stream_update_to_stream(dc, context, stream, stream_update); + if (update_type > UPDATE_TYPE_FAST) { + if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { + DC_ERROR("Mode validation failed for stream update!\n"); + dc_release_state(context); + return; + } + } + commit_planes_for_stream( dc, srf_updates, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 1599bb971111..e860ae05feda 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1151,6 +1151,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont bool video_large = false; bool desktop_large = false; bool dcc_disabled = false; + bool mpo_enabled = false; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) @@ -1159,6 +1160,9 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont if (context->stream_status[i].plane_count > 2) return DC_FAIL_UNSUPPORTED_1; + if (context->stream_status[i].plane_count > 1) + mpo_enabled = true; + for (j = 0; j < context->stream_status[i].plane_count; j++) { struct dc_plane_state *plane = context->stream_status[i].plane_states[j]; @@ -1182,6 +1186,10 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont } } + /* Disable MPO in multi-display configurations. */ + if (context->stream_count > 1 && mpo_enabled) + return DC_FAIL_UNSUPPORTED_1; + /* * Workaround: On DCN10 there is UMC issue that causes underflow when * playing 4k video on 4k desktop with video downscaled and single channel diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 9aa258f3550b..ddf66046616d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -121,35 +121,35 @@ void enc1_update_generic_info_packet( switch (packet_index) { case 0: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC0_FRAME_UPDATE, 1); + AFMT_GENERIC0_IMMEDIATE_UPDATE, 1); break; case 1: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC1_FRAME_UPDATE, 1); + AFMT_GENERIC1_IMMEDIATE_UPDATE, 1); break; case 2: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC2_FRAME_UPDATE, 1); + AFMT_GENERIC2_IMMEDIATE_UPDATE, 1); break; case 3: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC3_FRAME_UPDATE, 1); + AFMT_GENERIC3_IMMEDIATE_UPDATE, 1); break; case 4: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC4_FRAME_UPDATE, 1); + AFMT_GENERIC4_IMMEDIATE_UPDATE, 1); break; case 5: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC5_FRAME_UPDATE, 1); + AFMT_GENERIC5_IMMEDIATE_UPDATE, 1); break; case 6: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC6_FRAME_UPDATE, 1); + AFMT_GENERIC6_IMMEDIATE_UPDATE, 1); break; case 7: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC7_FRAME_UPDATE, 1); + AFMT_GENERIC7_IMMEDIATE_UPDATE, 1); break; default: break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index a512cbea00d1..b9656614950e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -275,7 +275,14 @@ struct dcn10_stream_enc_registers { SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\ @@ -339,7 +346,14 @@ struct dcn10_stream_enc_registers { type AFMT_GENERIC2_FRAME_UPDATE;\ type AFMT_GENERIC3_FRAME_UPDATE;\ type AFMT_GENERIC4_FRAME_UPDATE;\ + type AFMT_GENERIC0_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC1_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC2_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC3_IMMEDIATE_UPDATE;\ type AFMT_GENERIC4_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC5_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC6_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC7_IMMEDIATE_UPDATE;\ type AFMT_GENERIC5_FRAME_UPDATE;\ type AFMT_GENERIC6_FRAME_UPDATE;\ type AFMT_GENERIC7_FRAME_UPDATE;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index c13dce760098..05b98eadc289 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2845,7 +2845,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); DC_LOGGER_INIT(dc->ctx->logger); BW_VAL_TRACE_COUNT(); diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index 89ef9f6860e5..16df2a485dd0 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg); */ static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2) { + if (arg1.value == 0) + return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero; + return dc_fixpt_exp( dc_fixpt_mul( dc_fixpt_log(arg1), diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 2d8f14b69117..51d07a4561ce 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -799,7 +799,7 @@ static bool build_regamma(struct pwl_float_data_ex *rgb_regamma, pow_buffer_ptr = -1; // reset back to no optimize ret = true; release: - kfree(coeff); + kvfree(coeff); return ret; } @@ -1862,7 +1862,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, kfree(rgb_regamma); rgb_regamma_alloc_fail: - kvfree(rgb_user); + kfree(rgb_user); rgb_user_alloc_fail: return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index fed3fc4bb57a..6322e57893db 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -209,8 +209,7 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->need_min_deep_sleep_dcefclk && - smu10_data->deep_sleep_dcefclk != clock) { + if (clock && smu10_data->deep_sleep_dcefclk != clock) { smu10_data->deep_sleep_dcefclk = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, @@ -223,8 +222,7 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->dcf_actual_hard_min_freq && - smu10_data->dcf_actual_hard_min_freq != clock) { + if (clock && smu10_data->dcf_actual_hard_min_freq != clock) { smu10_data->dcf_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinDcefclkByFreq, @@ -237,8 +235,7 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->f_actual_hard_min_freq && - smu10_data->f_actual_hard_min_freq != clock) { + if (clock && smu10_data->f_actual_hard_min_freq != clock) { smu10_data->f_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index e6da53e9c3f4..3a2a1dc9a786 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3575,7 +3575,8 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, case AMDGPU_PP_SENSOR_GPU_POWER: return smu7_get_gpu_power(hwmgr, (uint32_t *)value); case AMDGPU_PP_SENSOR_VDDGFX: - if ((data->vr_config & 0xff) == 0x2) + if ((data->vr_config & VRCONF_VDDGFX_MASK) == + (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT)) val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); else diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index ba8763daa380..e8d01abf27fa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -364,17 +364,29 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_tdp_table *tdp_table = pp_table_info->tdp_table; struct amdgpu_device *adev = hwmgr->adev; - int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP; + int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; - if (low < range->min) - low = range->min; - if (high > range->max) - high = range->max; + /* compare them in unit celsius degree */ + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + /* + * As a common sense, usSoftwareShutdownTemp should be bigger + * than ThotspotLimit. For any invalid usSoftwareShutdownTemp, + * we will just use the max possible setting VEGA10_THERMAL_MAXIMUM_ALERT_TEMP + * to avoid false alarms. + */ + if ((tdp_table->usSoftwareShutdownTemp > + range->hotspot_crit_max / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)) { + if (high > tdp_table->usSoftwareShutdownTemp) + high = tdp_table->usSoftwareShutdownTemp; + } if (low > high) return -EINVAL; @@ -383,8 +395,8 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) & (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) & (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c index 904eb2c9155b..40e7c72eeae0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c @@ -170,17 +170,18 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; struct amdgpu_device *adev = hwmgr->adev; - int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP; + int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; - if (low < range->min) - low = range->min; - if (high > range->max) - high = range->max; + /* compare them in unit celsius degree */ + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + if (high > pptable_information->us_software_shutdown_temp) + high = pptable_information->us_software_shutdown_temp; if (low > high) return -EINVAL; @@ -189,8 +190,8 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index f5915308e643..947e4fa3c5e6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -981,27 +981,15 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); - uint64_t features_enabled; - int i; - bool enabled; - int ret = 0; + int i, ret = 0; PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures)) == 0, "[DisableAllSMUFeatures] Failed to disable all smu features!", return ret); - ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); - PP_ASSERT_WITH_CODE(!ret, - "[DisableAllSMUFeatures] Failed to get enabled smc features!", - return ret); - - for (i = 0; i < GNLD_FEATURES_MAX; i++) { - enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? - true : false; - data->smu_features[i].enabled = enabled; - data->smu_features[i].supported = enabled; - } + for (i = 0; i < GNLD_FEATURES_MAX; i++) + data->smu_features[i].enabled = 0; return 0; } @@ -1652,12 +1640,6 @@ static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr) data->uvd_power_gated = true; data->vce_power_gated = true; - - if (data->smu_features[GNLD_DPM_UVD].enabled) - data->uvd_power_gated = false; - - if (data->smu_features[GNLD_DPM_VCE].enabled) - data->vce_power_gated = false; } static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) @@ -3211,10 +3193,11 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks) { - uint64_t features_enabled; - uint64_t features_to_enable; - uint64_t features_to_disable; - int ret = 0; + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + uint64_t features_enabled, features_to_enable, features_to_disable; + int i, ret = 0; + bool enabled; if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) return -EINVAL; @@ -3243,6 +3226,17 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; } + /* Update the cached feature enablement state */ + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); + if (ret) + return ret; + + for (i = 0; i < GNLD_FEATURES_MAX; i++) { + enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? + true : false; + data->smu_features[i].enabled = enabled; + } + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c index ede54e87e287..ce56b93871e8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c @@ -240,17 +240,18 @@ int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; struct amdgpu_device *adev = hwmgr->adev; - int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP; + int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; - if (low < range->min) - low = range->min; - if (high > range->max) - high = range->max; + /* compare them in unit celsius degree */ + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + if (high > pptable_information->us_software_shutdown_temp) + high = pptable_information->us_software_shutdown_temp; if (low > high) return -EINVAL; @@ -259,8 +260,8 @@ static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 0922d9cd858a..c4d8c52c6b9c 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -171,7 +171,8 @@ static int smu_v11_0_init_microcode(struct smu_context *smu) chip_name = "navi12"; break; default: - BUG(); + dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type); + return -EINVAL; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 15590fd86ef4..09a3d8ae4449 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -239,7 +239,7 @@ static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) switch (dev_id) { case 0x67BA: - case 0x66B1: + case 0x67B1: smu_data->power_tune_defaults = &defaults_hawaii_pro; break; case 0x67B8: @@ -2725,7 +2725,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) { - return ci_is_smc_ram_running(hwmgr); + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, + VOLTAGE_CONTROLLER_ON)) + ? true : false; } static int ci_smu_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index ae18fbcb26fb..86bdb0194493 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -642,9 +642,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, /* sclk is bigger than max sclk in the dependence table */ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), - (dep_table->entries[i - 1].vddc - - (uint16_t)VDDC_VDDCI_DELTA)); if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) *voltage |= (data->vbios_boot_state.vddci_bootup_value * @@ -652,8 +649,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, else if (dep_table->entries[i - 1].vddci) *voltage |= (dep_table->entries[i - 1].vddci * VOLTAGE_SCALE) << VDDC_SHIFT; - else + else { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i - 1].vddc - + (uint16_t)VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 3c70a53813bf..0b2bb485d9be 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -928,7 +928,7 @@ int malidp_de_planes_init(struct drm_device *drm) const struct malidp_hw_regmap *map = &malidp->dev->hw->map; struct malidp_plane *plane = NULL; enum drm_plane_type plane_type; - unsigned long crtcs = 1 << drm->mode_config.num_crtc; + unsigned long crtcs = BIT(drm->mode_config.num_crtc); unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 02a9c1ed165b..fa50ab2523d4 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -194,6 +194,7 @@ int bochs_kms_init(struct bochs_device *bochs) bochs->dev->mode_config.preferred_depth = 24; bochs->dev->mode_config.prefer_shadow = 0; bochs->dev->mode_config.prefer_shadow_fbdev = 1; + bochs->dev->mode_config.fbdev_use_iomem = true; bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; bochs->dev->mode_config.funcs = &bochs_mode_funcs; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c index a428185be2c1..d05b3033b510 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c @@ -19,13 +19,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs, { switch (fs) { case 32000: - *n = 4096; + case 48000: + case 96000: + case 192000: + *n = fs * 128 / 1000; break; case 44100: - *n = 6272; - break; - case 48000: - *n = 6144; + case 88200: + case 176400: + *n = fs * 128 / 900; break; } diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index bd3165ee5354..04431dbac4a4 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -177,7 +177,7 @@ static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len) static u8 sii8620_readb(struct sii8620 *ctx, u16 addr) { - u8 ret; + u8 ret = 0; sii8620_read_buf(ctx, addr, &ret, 1); return ret; diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 0a580957c8cf..f1de4bb6558c 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -647,6 +647,12 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, buf[i]); } + /* Clear old status bits before start so we don't get confused */ + regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG, + AUX_IRQ_STATUS_NAT_I2C_FAIL | + AUX_IRQ_STATUS_AUX_RPLY_TOUT | + AUX_IRQ_STATUS_AUX_SHORT); + regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND); ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val, diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 4c766624b20d..2337b3827e6a 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -511,6 +512,10 @@ int drm_connector_register(struct drm_connector *connector) drm_mode_object_register(connector->dev, &connector->base); connector->registration_state = DRM_CONNECTOR_REGISTERED; + + /* Let userspace know we have a new connector */ + drm_sysfs_hotplug_event(connector->dev); + goto unlock; err_debugfs: diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index eab0f2687cd6..00debd02c322 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -337,13 +337,13 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf, buf[len] = '\0'; - if (!strcmp(buf, "on")) + if (sysfs_streq(buf, "on")) connector->force = DRM_FORCE_ON; - else if (!strcmp(buf, "digital")) + else if (sysfs_streq(buf, "digital")) connector->force = DRM_FORCE_ON_DIGITAL; - else if (!strcmp(buf, "off")) + else if (sysfs_streq(buf, "off")) connector->force = DRM_FORCE_OFF; - else if (!strcmp(buf, "unspecified")) + else if (sysfs_streq(buf, "unspecified")) connector->force = DRM_FORCE_UNSPECIFIED; else return -EINVAL; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 4b7aaad07423..2de1eebe591f 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -3368,11 +3369,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, { int ret; - port = drm_dp_mst_topology_get_port_validated(mgr, port); - if (!port) + if (slots < 0) return false; - if (slots < 0) + port = drm_dp_mst_topology_get_port_validated(mgr, port); + if (!port) return false; if (port->vcpi.vcpi > 0) { @@ -3388,6 +3389,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, if (ret) { DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), ret); + drm_dp_mst_topology_put_port(port); goto out; } DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", @@ -3498,6 +3500,17 @@ fail: return ret; } +static int do_get_act_status(struct drm_dp_aux *aux) +{ + int ret; + u8 status; + + ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + if (ret < 0) + return ret; + + return status; +} /** * drm_dp_check_act_status() - Check ACT handled status. @@ -3507,33 +3520,29 @@ fail: */ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) { - u8 status; - int ret; - int count = 0; + /* + * There doesn't seem to be any recommended retry count or timeout in + * the MST specification. Since some hubs have been observed to take + * over 1 second to update their payload allocations under certain + * conditions, we use a rather large timeout value. + */ + const int timeout_ms = 3000; + int ret, status; - do { - ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); - - if (ret < 0) { - DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); - goto fail; - } - - if (status & DP_PAYLOAD_ACT_HANDLED) - break; - count++; - udelay(100); - - } while (count < 30); - - if (!(status & DP_PAYLOAD_ACT_HANDLED)) { - DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); - ret = -EINVAL; - goto fail; + ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, + status & DP_PAYLOAD_ACT_HANDLED || status < 0, + 200, timeout_ms * USEC_PER_MSEC); + if (ret < 0 && status >= 0) { + DRM_DEBUG_KMS("Failed to get ACT after %dms, last status: %02x\n", + timeout_ms, status); + return -EINVAL; + } else if (status < 0) { + DRM_DEBUG_KMS("Failed to read payload table status: %d\n", + status); + return status; } + return 0; -fail: - return ret; } EXPORT_SYMBOL(drm_dp_check_act_status); diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index cf804389f5ec..d50a7884e69e 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -84,7 +84,7 @@ int drm_i2c_encoder_init(struct drm_device *dev, err = encoder_drv->encoder_init(client, dev, encoder); if (err) - goto fail_unregister; + goto fail_module_put; if (info->platform_data) encoder->slave_funcs->set_config(&encoder->base, @@ -92,9 +92,10 @@ int drm_i2c_encoder_init(struct drm_device *dev, return 0; +fail_module_put: + module_put(module); fail_unregister: i2c_unregister_device(client); - module_put(module); fail: return err; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 8d193a58363d..6b8502bcf0fd 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -390,7 +390,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, unsigned int y; for (y = clip->y1; y < clip->y2; y++) { - memcpy(dst, src, len); + if (!fb_helper->dev->mode_config.fbdev_use_iomem) + memcpy(dst, src, len); + else + memcpy_toio((void __iomem *)dst, src, len); + src += fb->pitches[0]; dst += fb->pitches[0]; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 6854f5867d51..1fdc85a71cec 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -710,6 +710,8 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, if (!objs) return -ENOMEM; + *objs_out = objs; + handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); if (!handles) { ret = -ENOMEM; @@ -723,8 +725,6 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, } ret = objects_lookup(filp, handles, count, objs); - *objs_out = objs; - out: kvfree(handles); return ret; @@ -872,9 +872,6 @@ err: * @file_priv: drm file-private structure * * Open an object using the global name, returning a handle and the size. - * - * This handle (of course) holds a reference to the object, so the object - * will not go away until the handle is deleted. */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, @@ -899,14 +896,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, &handle); - drm_gem_object_put_unlocked(obj); if (ret) - return ret; + goto err; args->handle = handle; args->size = obj->size; - return 0; +err: + drm_gem_object_put_unlocked(obj); + return ret; } /** diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index a05e64e3d80b..4042f5b39765 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -937,7 +937,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc, } } - tr.len = chunk; + tr.len = chunk * 2; len -= chunk; ret = spi_sync(spi, &m); diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index bd2498bbd74a..b99f96dcc6f1 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -1029,11 +1029,11 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format); */ int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline) { - u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8, - scanline & 0xff }; + u8 payload[2] = { scanline >> 8, scanline & 0xff }; ssize_t err; - err = mipi_dsi_generic_write(dsi, payload, sizeof(payload)); + err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_SCANLINE, payload, + sizeof(payload)); if (err < 0) return err; diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index ffd95bfeaa94..58f5dc2f6dd5 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data { int orientation; }; -static const struct drm_dmi_panel_orientation_data acer_s1003 = { - .width = 800, - .height = 1280, - .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, -}; - static const struct drm_dmi_panel_orientation_data asus_t100ha = { .width = 800, .height = 1280, @@ -114,13 +108,25 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), }, - .driver_data = (void *)&acer_s1003, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* Asus T100HA */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"), }, .driver_data = (void *)&asus_t100ha, + }, { /* Asus T101HA */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Asus T103HAF */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* GPD MicroPC (generic strings, also match on bios date) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index dd2bc85f43cc..4fd2f6cd03c1 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -293,9 +293,6 @@ int drm_sysfs_connector_add(struct drm_connector *connector) return PTR_ERR(connector->kdev); } - /* Let userspace know we have a new connector */ - drm_sysfs_hotplug_event(dev); - if (connector->ddc) return sysfs_create_link(&connector->kdev->kobj, &connector->ddc->dev.kobj, "ddc"); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 7e4e2959bf4f..0c9c40720ca9 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -12,6 +12,7 @@ #include "common.xml.h" #include "state.xml.h" +#include "state_blt.xml.h" #include "state_hi.xml.h" #include "state_3d.xml.h" #include "cmdstream.xml.h" @@ -233,6 +234,8 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu) struct etnaviv_cmdbuf *buffer = &gpu->buffer; unsigned int waitlink_offset = buffer->user_size - 16; u32 link_target, flush = 0; + bool has_blt = !!(gpu->identity.minor_features5 & + chipMinorFeatures5_BLT_ENGINE); lockdep_assert_held(&gpu->lock); @@ -248,16 +251,38 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu) if (flush) { unsigned int dwords = 7; + if (has_blt) + dwords += 10; + link_target = etnaviv_buffer_reserve(gpu, buffer, dwords); CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); - if (gpu->exec_state == ETNA_PIPE_3D) - CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, - VIVS_TS_FLUSH_CACHE_FLUSH); + if (gpu->exec_state == ETNA_PIPE_3D) { + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } else { + CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, + VIVS_TS_FLUSH_CACHE_FLUSH); + } + } CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } CMD_END(buffer); etnaviv_buffer_replace_wait(buffer, waitlink_offset, @@ -323,6 +348,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, bool switch_mmu_context = gpu->mmu_context != mmu_context; unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq); bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq; + bool has_blt = !!(gpu->identity.minor_features5 & + chipMinorFeatures5_BLT_ENGINE); lockdep_assert_held(&gpu->lock); @@ -433,6 +460,15 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, * 2 semaphore stall + 1 event + 1 wait + 1 link. */ return_dwords = 7; + + /* + * When the BLT engine is present we need 6 more dwords in the return + * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable, + * but we don't need the normal TS flush state. + */ + if (has_blt) + return_dwords += 6; + return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); CMD_LINK(cmdbuf, return_dwords, return_target); @@ -447,11 +483,25 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR); - CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, - VIVS_TS_FLUSH_CACHE_FLUSH); + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } else { + CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, + VIVS_TS_FLUSH_CACHE_FLUSH); + } } CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); + + if (has_blt) { + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); + CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT); + CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0); + } + CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | VIVS_GL_EVENT_FROM_PE); CMD_WAIT(buffer); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index d47d1a8e0219..85de8551ce86 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -713,7 +713,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) ret = pm_runtime_get_sync(gpu->dev); if (ret < 0) { dev_err(gpu->dev, "Failed to enable GPU power domain\n"); - return ret; + goto pm_put; } etnaviv_hw_identify(gpu); @@ -802,6 +802,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) fail: pm_runtime_mark_last_busy(gpu->dev); +pm_put: pm_runtime_put_autosuspend(gpu->dev); return ret; @@ -842,7 +843,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) ret = pm_runtime_get_sync(gpu->dev); if (ret < 0) - return ret; + goto pm_put; dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW); dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH); @@ -965,6 +966,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) ret = 0; pm_runtime_mark_last_busy(gpu->dev); +pm_put: pm_runtime_put_autosuspend(gpu->dev); return ret; @@ -978,7 +980,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) dev_err(gpu->dev, "recover hung GPU!\n"); if (pm_runtime_get_sync(gpu->dev) < 0) - return; + goto pm_put; mutex_lock(&gpu->lock); @@ -997,6 +999,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) mutex_unlock(&gpu->lock); pm_runtime_mark_last_busy(gpu->dev); +pm_put: pm_runtime_put_autosuspend(gpu->dev); } @@ -1269,8 +1272,10 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) if (!submit->runtime_resumed) { ret = pm_runtime_get_sync(gpu->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(gpu->dev); return NULL; + } submit->runtime_resumed = true; } @@ -1287,6 +1292,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) ret = event_alloc(gpu, nr_events, event); if (ret) { DRM_ERROR("no free events\n"); + pm_runtime_put_noidle(gpu->dev); return NULL; } @@ -1457,7 +1463,7 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) if (gpu->clk_bus) { ret = clk_prepare_enable(gpu->clk_bus); if (ret) - return ret; + goto disable_clk_reg; } if (gpu->clk_core) { @@ -1480,6 +1486,9 @@ disable_clk_core: disable_clk_bus: if (gpu->clk_bus) clk_disable_unprepare(gpu->clk_bus); +disable_clk_reg: + if (gpu->clk_reg) + clk_disable_unprepare(gpu->clk_reg); return ret; } diff --git a/drivers/gpu/drm/etnaviv/state_blt.xml.h b/drivers/gpu/drm/etnaviv/state_blt.xml.h index daae55995def..0e8bcf9dcc93 100644 --- a/drivers/gpu/drm/etnaviv/state_blt.xml.h +++ b/drivers/gpu/drm/etnaviv/state_blt.xml.h @@ -46,6 +46,8 @@ DEALINGS IN THE SOFTWARE. /* This is a cut-down version of the state_blt.xml.h file */ +#define VIVS_BLT_SET_COMMAND 0x000140ac + #define VIVS_BLT_ENABLE 0x000140b8 #define VIVS_BLT_ENABLE_ENABLE 0x00000001 diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c index 619f81435c1b..58b89ec11b0e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dma.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c @@ -61,7 +61,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, struct device *subdrv_dev, void **dma_priv) { struct exynos_drm_private *priv = drm_dev->dev_private; - int ret; + int ret = 0; if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) { DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n", @@ -92,7 +92,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, if (ret) clear_dma_max_seg_size(subdrv_dev); - return 0; + return ret; } /* diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index b78e8c5ba553..2aff986add89 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -268,8 +268,10 @@ static void mic_pre_enable(struct drm_bridge *bridge) goto unlock; ret = pm_runtime_get_sync(mic->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(mic->dev); goto unlock; + } mic_set_path(mic, 1); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 4ab6531a4a74..2efc317c90df 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1292,8 +1292,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, bool is_tc_port = intel_phy_is_tc(i915, phy); i915_reg_t ch_ctl, ch_data[5]; u32 aux_clock_divider; - enum intel_display_power_domain aux_domain = - intel_aux_power_domain(intel_dig_port); + enum intel_display_power_domain aux_domain; intel_wakeref_t aux_wakeref; intel_wakeref_t pps_wakeref; int i, ret, recv_bytes; @@ -1308,6 +1307,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, if (is_tc_port) intel_tc_port_lock(intel_dig_port); + aux_domain = intel_aux_power_domain(intel_dig_port); + aux_wakeref = intel_display_power_get(i915, aux_domain); pps_wakeref = pps_lock(intel_dp); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 4c4954e8ce0a..3f875aebbd23 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -36,7 +36,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned int max_segment = i915_sg_segment_size(); unsigned int sg_page_sizes; - struct pagevec pvec; gfp_t noreclaim; int ret; @@ -188,13 +187,17 @@ err_sg: sg_mark_end(sg); err_pages: mapping_clear_unevictable(mapping); - pagevec_init(&pvec); - for_each_sgt_page(page, sgt_iter, st) { - if (!pagevec_add(&pvec, page)) + if (sg != st->sgl) { + struct pagevec pvec; + + pagevec_init(&pvec); + for_each_sgt_page(page, sgt_iter, st) { + if (!pagevec_add(&pvec, page)) + check_release_pagevec(&pvec); + } + if (pagevec_count(&pvec)) check_release_pagevec(&pvec); } - if (pagevec_count(&pvec)) - check_release_pagevec(&pvec); sg_free_table(st); kfree(st); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 66f6d1a897f2..c169f0f70f3a 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3751,6 +3751,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); intel_engine_init_execlists(&ve->base); + ve->base.breadcrumbs.irq_armed = true; /* fake HW, used for irq_work */ ve->base.cops = &virtual_context_ops; ve->base.request_alloc = execlists_request_alloc; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index fc29a3705354..56cd14cacf5e 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -963,18 +963,6 @@ static int cmd_handler_lri(struct parser_exec_state *s) int i, ret = 0; int cmd_len = cmd_length(s); struct intel_gvt *gvt = s->vgpu->gvt; - u32 valid_len = CMD_LEN(1); - - /* - * Official intel docs are somewhat sloppy , check the definition of - * MI_LOAD_REGISTER_IMM. - */ - #define MAX_VALID_LEN 127 - if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) { - gvt_err("len is not valid: len=%u valid_len=%u\n", - cmd_len, valid_len); - return -EFAULT; - } for (i = 1; i < cmd_len; i += 2) { if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 25f78196b964..689b07bc91c4 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3103,8 +3103,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_D(GAMT_CHKN_BIT_REG, D_KBL); - MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL); + MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL); + MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS); return 0; } diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index f24096e27bef..a9a69760c18d 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -572,6 +572,9 @@ struct drm_i915_reg_descriptor { #define REG32(_reg, ...) \ { .addr = (_reg), __VA_ARGS__ } +#define REG32_IDX(_reg, idx) \ + { .addr = _reg(idx) } + /* * Convenience macro for adding 64-bit registers. * @@ -669,6 +672,7 @@ static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), REG32(BCS_SWCTRL), REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), + REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE), REG64_IDX(BCS_GPR, 0), REG64_IDX(BCS_GPR, 1), REG64_IDX(BCS_GPR, 2), @@ -1203,6 +1207,12 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, return dst; } +static inline bool cmd_desc_is(const struct drm_i915_cmd_descriptor * const desc, + const u32 cmd) +{ + return desc->cmd.value == (cmd & desc->cmd.mask); +} + static bool check_cmd(const struct intel_engine_cs *engine, const struct drm_i915_cmd_descriptor *desc, const u32 *cmd, u32 length) @@ -1241,19 +1251,19 @@ static bool check_cmd(const struct intel_engine_cs *engine, * allowed mask/value pair given in the whitelist entry. */ if (reg->mask) { - if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { + if (cmd_desc_is(desc, MI_LOAD_REGISTER_MEM)) { DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n", reg_addr); return false; } - if (desc->cmd.value == MI_LOAD_REGISTER_REG) { + if (cmd_desc_is(desc, MI_LOAD_REGISTER_REG)) { DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n", reg_addr); return false; } - if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && + if (cmd_desc_is(desc, MI_LOAD_REGISTER_IMM(1)) && (offset + 2 > length || (cmd[offset + 1] & reg->mask) != reg->value)) { DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n", @@ -1484,7 +1494,7 @@ int intel_engine_cmd_parser(struct i915_gem_context *ctx, goto err; } - if (desc->cmd.value == MI_BATCH_BUFFER_START) { + if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) { ret = check_bbstart(ctx, cmd, offset, length, batch_len, batch_start, shadow_batch_start); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 37e3dd3c1a9d..4193a9970251 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3500,6 +3500,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) val = I915_READ(GEN11_DE_HPD_IMR); val &= ~hotplug_irqs; + val |= ~enabled_irqs & hotplug_irqs; I915_WRITE(GEN11_DE_HPD_IMR, val); POSTING_READ(GEN11_DE_HPD_IMR); diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 6a88db291252..b3fd6ff665da 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -158,9 +158,13 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, do { list_for_each_entry_safe(pos, next, &x->head, entry) { - pos->func(pos, - TASK_NORMAL, fence->error, - &extra); + int wake_flags; + + wake_flags = fence->error; + if (pos->func == autoremove_wake_function) + wake_flags = 0; + + pos->func(pos, TASK_NORMAL, wake_flags, &extra); } if (list_empty(&extra)) diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 6f58f322bc11..129b3189117f 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -395,9 +395,8 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, if (!pdev->dev.of_node) return -ENODEV; - hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); - if (!hdmi) - return -ENOMEM; + hdmi = dev_get_drvdata(dev); + memset(hdmi, 0, sizeof(*hdmi)); match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node); if (!match) @@ -431,8 +430,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); - platform_set_drvdata(pdev, hdmi); - if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx8mp-hdmi")) { ret = imx8mp_hdmimix_setup(hdmi); if (ret < 0) @@ -468,6 +465,14 @@ static const struct component_ops dw_hdmi_imx_ops = { static int dw_hdmi_imx_probe(struct platform_device *pdev) { + struct imx_hdmi *hdmi; + + hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return -ENOMEM; + + platform_set_drvdata(pdev, hdmi); + return component_add(&pdev->dev, &dw_hdmi_imx_ops); } diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 79747d36ac12..e29a5e882005 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -323,9 +323,10 @@ static void imx_drm_unbind(struct device *dev) drm_kms_helper_poll_fini(drm); + component_unbind_all(drm->dev, drm); + drm_mode_config_cleanup(drm); - component_unbind_all(drm->dev, drm); dev_set_drvdata(dev, NULL); drm_dev_put(drm); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index f5a74a6bbe2b..3357479c5b5a 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -241,6 +241,13 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) struct ldb *ldb = &imx_ldb->base; int mux, ret; + if (imx_ldb_ch == &imx_ldb->channel[0] || ldb->dual) + ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; + if (imx_ldb_ch == &imx_ldb->channel[1] || ldb->dual) + ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK; + + regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl); + if (ldb->dual) { clk_disable_unprepare(imx_ldb->clk[0]); clk_disable_unprepare(imx_ldb->clk[1]); @@ -560,16 +567,15 @@ static const struct component_ops imx_ldb_ops = { static int imx_ldb_probe(struct platform_device *pdev) { - struct device *dev = &pdev->dev; struct imx_ldb *imx_ldb; - imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL); + imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL); if (!imx_ldb) return -ENOMEM; - dev_set_drvdata(dev, imx_ldb); + platform_set_drvdata(pdev, imx_ldb); - return component_add(dev, &imx_ldb_ops); + return component_add(&pdev->dev, &imx_ldb_ops); } static int imx_ldb_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 5bbfaa2cd0f4..f91c3eb7697b 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -494,6 +494,13 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve) return 0; } +static void imx_tve_disable_regulator(void *data) +{ + struct imx_tve *tve = data; + + regulator_disable(tve->dac_reg); +} + static bool imx_tve_readable_reg(struct device *dev, unsigned int reg) { return (reg % 4 == 0) && (reg <= 0xdc); @@ -546,9 +553,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) int irq; int ret; - tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL); - if (!tve) - return -ENOMEM; + tve = dev_get_drvdata(dev); + memset(tve, 0, sizeof(*tve)); tve->dev = dev; spin_lock_init(&tve->lock); @@ -618,6 +624,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) ret = regulator_enable(tve->dac_reg); if (ret) return ret; + ret = devm_add_action_or_reset(dev, imx_tve_disable_regulator, tve); + if (ret) + return ret; } tve->clk = devm_clk_get(dev, "tve"); @@ -659,27 +668,23 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - dev_set_drvdata(dev, tve); - return 0; } -static void imx_tve_unbind(struct device *dev, struct device *master, - void *data) -{ - struct imx_tve *tve = dev_get_drvdata(dev); - - if (!IS_ERR(tve->dac_reg)) - regulator_disable(tve->dac_reg); -} - static const struct component_ops imx_tve_ops = { .bind = imx_tve_bind, - .unbind = imx_tve_unbind, }; static int imx_tve_probe(struct platform_device *pdev) { + struct imx_tve *tve; + + tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL); + if (!tve) + return -ENOMEM; + + platform_set_drvdata(pdev, tve); + return component_add(&pdev->dev, &imx_tve_ops); } diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c index 4587d5e60a72..5592fddb2529 100644 --- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c @@ -441,9 +441,8 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data) struct ipu_crtc *ipu_crtc; int ret; - ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL); - if (!ipu_crtc) - return -ENOMEM; + ipu_crtc = dev_get_drvdata(dev); + memset(ipu_crtc, 0, sizeof(*ipu_crtc)); ipu_crtc->dev = dev; @@ -455,8 +454,6 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data) drm->mode_config.helper_private = &ipuv3_drm_mode_config_helpers; drm->mode_config.allow_fb_modifiers = true; - dev_set_drvdata(dev, ipu_crtc); - return 0; } @@ -479,6 +476,7 @@ static const struct component_ops ipu_crtc_ops = { static int ipu_drm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct ipu_crtc *ipu_crtc; int ret; if (!dev->platform_data) @@ -488,6 +486,12 @@ static int ipu_drm_probe(struct platform_device *pdev) if (ret) return ret; + ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL); + if (!ipu_crtc) + return -ENOMEM; + + dev_set_drvdata(dev, ipu_crtc); + return component_add(dev, &ipu_crtc_ops); } diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index e7ce17503ae1..be55548f352a 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -204,9 +204,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) u32 bus_format = 0; const char *fmt; - imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL); - if (!imxpd) - return -ENOMEM; + imxpd = dev_get_drvdata(dev); + memset(imxpd, 0, sizeof(*imxpd)); edidp = of_get_property(np, "edid", &imxpd->edid_len); if (edidp) @@ -236,8 +235,6 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - dev_set_drvdata(dev, imxpd); - return 0; } @@ -259,6 +256,14 @@ static const struct component_ops imx_pd_ops = { static int imx_pd_probe(struct platform_device *pdev) { + struct imx_parallel_display *imxpd; + + imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL); + if (!imxpd) + return -ENOMEM; + + platform_set_drvdata(pdev, imxpd); + return component_add(&pdev->dev, &imx_pd_ops); } diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c index 376fca6ca9f4..7e6179fe63f8 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c @@ -375,9 +375,9 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, if (state && state->fb) { addr = drm_fb_cma_get_gem_addr(state->fb, state, 0); - width = state->crtc->state->adjusted_mode.hdisplay; - height = state->crtc->state->adjusted_mode.vdisplay; - cpp = state->fb->format->cpp[plane->index]; + width = state->src_w >> 16; + height = state->src_h >> 16; + cpp = state->fb->format->cpp[0]; priv->dma_hwdesc->addr = addr; priv->dma_hwdesc->cmd = width * height * cpp / 4; diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 5649887d2b90..16e5fb9ec784 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -215,7 +215,6 @@ static int mcde_modeset_init(struct drm_device *drm) drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); - drm_fbdev_generic_setup(drm, 32); return 0; @@ -282,6 +281,8 @@ static int mcde_drm_bind(struct device *dev) if (ret < 0) goto unbind; + drm_fbdev_generic_setup(drm, 32); + return 0; unbind: diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index 35bb825d1918..8c8c92fc82e9 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -940,10 +940,9 @@ static int mcde_dsi_bind(struct device *dev, struct device *master, panel = NULL; bridge = of_drm_find_bridge(child); - if (IS_ERR(bridge)) { - dev_err(dev, "failed to find bridge (%ld)\n", - PTR_ERR(bridge)); - return PTR_ERR(bridge); + if (!bridge) { + dev_err(dev, "failed to find bridge\n"); + return -EINVAL; } } } diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index be6d95c5ff25..48de07e9059e 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -10,7 +10,9 @@ #include #include #include +#include #include +#include #include #include @@ -73,6 +75,9 @@ struct mtk_dpi { enum mtk_dpi_out_yc_map yc_map; enum mtk_dpi_out_bit_num bit_num; enum mtk_dpi_out_channel_swap channel_swap; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_gpio; + struct pinctrl_state *pins_dpi; int refcount; }; @@ -378,6 +383,9 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi) if (--dpi->refcount != 0) return; + if (dpi->pinctrl && dpi->pins_gpio) + pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); + mtk_dpi_disable(dpi); clk_disable_unprepare(dpi->pixel_clk); clk_disable_unprepare(dpi->engine_clk); @@ -402,6 +410,9 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi) goto err_pixel; } + if (dpi->pinctrl && dpi->pins_dpi) + pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); + mtk_dpi_enable(dpi); return 0; @@ -689,6 +700,26 @@ static int mtk_dpi_probe(struct platform_device *pdev) dpi->dev = dev; dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev); + dpi->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(dpi->pinctrl)) { + dpi->pinctrl = NULL; + dev_dbg(&pdev->dev, "Cannot find pinctrl!\n"); + } + if (dpi->pinctrl) { + dpi->pins_gpio = pinctrl_lookup_state(dpi->pinctrl, "sleep"); + if (IS_ERR(dpi->pins_gpio)) { + dpi->pins_gpio = NULL; + dev_dbg(&pdev->dev, "Cannot find pinctrl idle!\n"); + } + if (dpi->pins_gpio) + pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); + + dpi->pins_dpi = pinctrl_lookup_state(dpi->pinctrl, "default"); + if (IS_ERR(dpi->pins_dpi)) { + dpi->pins_dpi = NULL; + dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n"); + } + } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); dpi->regs = devm_ioremap_resource(dev, mem); if (IS_ERR(dpi->regs)) { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 352b81a7a670..f98bb2e26372 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -594,8 +594,13 @@ err_pm: pm_runtime_disable(dev); err_node: of_node_put(private->mutex_node); - for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) + for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) { of_node_put(private->comp_node[i]); + if (private->ddp_comp[i]) { + put_device(private->ddp_comp[i]->larb_dev); + private->ddp_comp[i] = NULL; + } + } return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 584a9ecadce6..b7592b16ea94 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -101,6 +101,16 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, true, true); } +static void mtk_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct mtk_plane_state *state = to_mtk_plane_state(plane->state); + + state->pending.enable = false; + wmb(); /* Make sure the above parameter is set before update */ + state->pending.dirty = true; +} + static void mtk_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -115,6 +125,11 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, if (!crtc || WARN_ON(!fb)) return; + if (!plane->state->visible) { + mtk_plane_atomic_disable(plane, old_state); + return; + } + gem = fb->obj[0]; mtk_gem = to_mtk_gem_obj(gem); addr = mtk_gem->dma_addr; @@ -136,16 +151,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, state->pending.dirty = true; } -static void mtk_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct mtk_plane_state *state = to_mtk_plane_state(plane->state); - - state->pending.enable = false; - wmb(); /* Make sure the above parameter is set before update */ - state->pending.dirty = true; -} - static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = mtk_plane_atomic_check, diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index ce91b61364eb..6b22fd63c3f5 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1482,25 +1482,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, dev_err(dev, "Failed to get system configuration registers: %d\n", ret); - return ret; + goto put_device; } hdmi->sys_regmap = regmap; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdmi->regs = devm_ioremap_resource(dev, mem); - if (IS_ERR(hdmi->regs)) - return PTR_ERR(hdmi->regs); + if (IS_ERR(hdmi->regs)) { + ret = PTR_ERR(hdmi->regs); + goto put_device; + } remote = of_graph_get_remote_node(np, 1, 0); - if (!remote) - return -EINVAL; + if (!remote) { + ret = -EINVAL; + goto put_device; + } if (!of_device_is_compatible(remote, "hdmi-connector")) { hdmi->next_bridge = of_drm_find_bridge(remote); if (!hdmi->next_bridge) { dev_err(dev, "Waiting for external bridge\n"); of_node_put(remote); - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto put_device; } } @@ -1509,7 +1514,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n", remote); of_node_put(remote); - return -EINVAL; + ret = -EINVAL; + goto put_device; } of_node_put(remote); @@ -1517,10 +1523,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, of_node_put(i2c_np); if (!hdmi->ddc_adpt) { dev_err(dev, "Failed to get ddc i2c adapter by node\n"); - return -EINVAL; + ret = -EINVAL; + goto put_device; } return 0; +put_device: + put_device(hdmi->cec_dev); + return ret; } /* diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 1f83bc18d500..80f3b1da9fc2 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; + gpu_write(gpu, REG_AXXX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); + /* NOTE: PM4/micro-engine firmware registers look to be the same * for a2xx and a3xx.. we could possibly push that part down to * adreno_gpu base class. Or push both PM4 and PFP but diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 5f7e98028eaf..eeba2deeca1e 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -215,6 +215,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; + /* + * Use the default ringbuffer size and block size but disable the RPTR + * shadow + */ + gpu_write(gpu, REG_AXXX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + /* Set the ringbuffer address */ + gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); + /* setup access protection: */ gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007); diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index ab2b752566d8..05cfa81d4c54 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -265,6 +265,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; + /* + * Use the default ringbuffer size and block size but disable the RPTR + * shadow + */ + gpu_write(gpu, REG_A4XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + /* Set the ringbuffer address */ + gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); + /* Load PM4: */ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 99cd6e62a971..24b55103bfe0 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -677,14 +677,21 @@ static int a5xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; - a5xx_preempt_hw_init(gpu); - a5xx_gpmu_ucode_init(gpu); ret = a5xx_ucode_init(gpu); if (ret) return ret; + /* Set the ringbuffer address */ + gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI, + gpu->rb[0]->iova); + + gpu_write(gpu, REG_A5XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + a5xx_preempt_hw_init(gpu); + /* Disable the interrupts through the initial bringup stage */ gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK); @@ -1359,6 +1366,10 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu) { u64 busy_cycles, busy_time; + /* Only read the gpu busy if the hardware is already active */ + if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0) + return 0; + busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO, REG_A5XX_RBBM_PERFCTR_RBBM_0_HI); @@ -1367,6 +1378,8 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu) gpu->devfreq.busy_cycles = busy_cycles; + pm_runtime_put(&gpu->pdev->dev); + if (WARN_ON(busy_time > ~0LU)) return ~0LU; @@ -1445,7 +1458,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) check_speed_bin(&pdev->dev); - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4); + /* Restricting nr_rings to 1 to temporarily disable preemption */ + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) { a5xx_destroy(&(a5xx_gpu->base.base)); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 85f14feafdec..40431a09dc97 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -107,6 +107,13 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) struct msm_gpu *gpu = &adreno_gpu->base; int ret; + /* + * This can get called from devfreq while the hardware is idle. Don't + * bring up the power if it isn't already active + */ + if (pm_runtime_get_if_in_use(gmu->dev) == 0) + return; + gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, @@ -133,6 +140,7 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) * for now leave it at max so that the performance is nominal. */ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); + pm_runtime_put(gmu->dev); } void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) @@ -191,12 +199,22 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) { int ret; u32 val; + u32 mask, reset_val; + + val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); + if (val <= 0x20010004) { + mask = 0xffffffff; + reset_val = 0xbabeface; + } else { + mask = 0x1ff; + reset_val = 0x100; + } gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, - val == 0xbabeface, 100, 10000); + (val & mask) == reset_val, 100, 10000); if (ret) DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); @@ -705,10 +723,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) /* Turn on the resources */ pm_runtime_get_sync(gmu->dev); + /* + * "enable" the GX power domain which won't actually do anything but it + * will make sure that the refcounting is correct in case we need to + * bring down the GX after a GMU failure + */ + if (!IS_ERR_OR_NULL(gmu->gxpd)) + pm_runtime_get_sync(gmu->gxpd); + /* Use a known rate to bring up the GMU */ clk_set_rate(gmu->core_clk, 200000000); ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); if (ret) { + pm_runtime_put(gmu->gxpd); pm_runtime_put(gmu->dev); return ret; } @@ -744,19 +771,12 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) /* Set the GPU to the highest power frequency */ __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); - /* - * "enable" the GX power domain which won't actually do anything but it - * will make sure that the refcounting is correct in case we need to - * bring down the GX after a GMU failure - */ - if (!IS_ERR_OR_NULL(gmu->gxpd)) - pm_runtime_get(gmu->gxpd); - out: /* On failure, shut down the GMU to leave it in a good state */ if (ret) { disable_irq(gmu->gmu_irq); a6xx_rpmh_stop(gmu); + pm_runtime_put(gmu->gxpd); pm_runtime_put(gmu->dev); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 686c34d706b0..ab75f0309d4b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -512,6 +512,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu) if (ret) goto out; + /* Set the ringbuffer address */ + gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI, + gpu->rb[0]->iova); + + gpu_write(gpu, REG_A6XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; @@ -803,6 +810,11 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); u64 busy_cycles, busy_time; + + /* Only read the gpu busy if the hardware is already active */ + if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0) + return 0; + busy_cycles = gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H); @@ -812,6 +824,8 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) gpu->devfreq.busy_cycles = busy_cycles; + pm_runtime_put(a6xx_gpu->gmu.dev); + if (WARN_ON(busy_time > ~0LU)) return ~0LU; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 048c8be426f3..3802ad38c519 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -350,30 +350,10 @@ int adreno_hw_init(struct msm_gpu *gpu) ring->next = ring->start; /* reset completed fence seqno: */ - ring->memptrs->fence = ring->seqno; + ring->memptrs->fence = ring->fctx->completed_fence; ring->memptrs->rptr = 0; } - /* - * Setup REG_CP_RB_CNTL. The same value is used across targets (with - * the excpetion of A430 that disables the RPTR shadow) - the cacluation - * for the ringbuffer size and block size is moved to msm_gpu.h for the - * pre-processor to deal with and the A430 variant is ORed in here - */ - adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, - MSM_GPU_RB_CNTL_DEFAULT | - (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0)); - - /* Setup ringbuffer address - use ringbuffer[0] for GPU init */ - adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE, - REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova); - - if (!adreno_is_a430(adreno_gpu)) { - adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, - REG_ADRENO_CP_RB_RPTR_ADDR_HI, - rbmemptr(gpu->rb[0], rptr)); - } - return 0; } @@ -381,11 +361,8 @@ int adreno_hw_init(struct msm_gpu *gpu) static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, struct msm_ringbuffer *ring) { - if (adreno_is_a430(adreno_gpu)) - return ring->memptrs->rptr = adreno_gpu_read( - adreno_gpu, REG_ADRENO_CP_RB_RPTR); - else - return ring->memptrs->rptr; + return ring->memptrs->rptr = adreno_gpu_read( + adreno_gpu, REG_ADRENO_CP_RB_RPTR); } struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index ce59adff06aa..36c85c05b7cf 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -381,7 +381,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event) spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); if (!fevent) { - DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event); + DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event); return; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index d82ea994063f..99d449ce4a07 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -2185,7 +2185,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, dpu_enc = to_dpu_encoder_virt(enc); - mutex_init(&dpu_enc->enc_lock); ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); if (ret) goto fail; @@ -2200,7 +2199,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, 0); - mutex_init(&dpu_enc->rc_lock); INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, dpu_encoder_off_work); dpu_enc->idle_timeout = IDLE_TIMEOUT; @@ -2232,7 +2230,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL); if (!dpu_enc) - return ERR_PTR(ENOMEM); + return ERR_PTR(-ENOMEM); rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs, drm_enc_mode, NULL); @@ -2245,6 +2243,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, spin_lock_init(&dpu_enc->enc_spinlock); dpu_enc->enabled = false; + mutex_init(&dpu_enc->enc_lock); + mutex_init(&dpu_enc->rc_lock); return &dpu_enc->base; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 58d5acbcfc5c..b984bafd27e2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -853,9 +853,9 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); - min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale); + min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale); ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale, - pdpu->pipe_sblk->maxupscale << 16, + pdpu->pipe_sblk->maxdwnscale << 16, true, true); if (ret) { DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 91cd76a2bab1..77823ccdd0f8 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -1037,7 +1037,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) return 0; fail: - mdp5_destroy(pdev); + if (mdp5_kms) + mdp5_destroy(pdev); return ret; } diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 5ccfad794c6a..561bfa48841c 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -27,6 +27,34 @@ int msm_atomic_prepare_fb(struct drm_plane *plane, return msm_framebuffer_prepare(new_state->fb, kms->aspace); } +/* + * Helpers to control vblanks while we flush.. basically just to ensure + * that vblank accounting is switched on, so we get valid seqn/timestamp + * on pageflip events (if requested) + */ + +static void vblank_get(struct msm_kms *kms, unsigned crtc_mask) +{ + struct drm_crtc *crtc; + + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { + if (!crtc->state->active) + continue; + drm_crtc_vblank_get(crtc); + } +} + +static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) +{ + struct drm_crtc *crtc; + + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { + if (!crtc->state->active) + continue; + drm_crtc_vblank_put(crtc); + } +} + static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) { unsigned crtc_mask = BIT(crtc_idx); @@ -44,6 +72,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) kms->funcs->enable_commit(kms); + vblank_get(kms, crtc_mask); + /* * Flush hardware updates: */ @@ -58,6 +88,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); + vblank_put(kms, crtc_mask); + mutex_lock(&kms->commit_lock); kms->funcs->complete_commit(kms, crtc_mask); mutex_unlock(&kms->commit_lock); @@ -221,6 +253,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) */ kms->pending_crtc_mask &= ~crtc_mask; + vblank_get(kms, crtc_mask); + /* * Flush hardware updates: */ @@ -235,6 +269,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); + vblank_put(kms, crtc_mask); + mutex_lock(&kms->commit_lock); kms->funcs->complete_commit(kms, crtc_mask); mutex_unlock(&kms->commit_lock); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index b73fbb65e14b..4558d66761b3 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1321,6 +1321,13 @@ static int msm_pdev_remove(struct platform_device *pdev) return 0; } +static void msm_pdev_shutdown(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + + drm_atomic_helper_shutdown(drm); +} + static const struct of_device_id dt_match[] = { { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, @@ -1332,6 +1339,7 @@ MODULE_DEVICE_TABLE(of, dt_match); static struct platform_driver msm_platform_driver = { .probe = msm_pdev_probe, .remove = msm_pdev_remove, + .shutdown = msm_pdev_shutdown, .driver = { .name = "msm", .of_match_table = dt_match, diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 5a6a79fbc9d6..d92a0ffe2a76 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -977,10 +977,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, static int msm_gem_new_impl(struct drm_device *dev, uint32_t size, uint32_t flags, - struct drm_gem_object **obj, - bool struct_mutex_locked) + struct drm_gem_object **obj) { - struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; switch (flags & MSM_BO_CACHE_MASK) { @@ -1006,15 +1004,6 @@ static int msm_gem_new_impl(struct drm_device *dev, INIT_LIST_HEAD(&msm_obj->submit_entry); INIT_LIST_HEAD(&msm_obj->vmas); - if (struct_mutex_locked) { - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - } else { - mutex_lock(&dev->struct_mutex); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - mutex_unlock(&dev->struct_mutex); - } - *obj = &msm_obj->base; return 0; @@ -1024,6 +1013,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags, bool struct_mutex_locked) { struct msm_drm_private *priv = dev->dev_private; + struct msm_gem_object *msm_obj; struct drm_gem_object *obj = NULL; bool use_vram = false; int ret; @@ -1044,14 +1034,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, if (size == 0) return ERR_PTR(-EINVAL); - ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked); + ret = msm_gem_new_impl(dev, size, flags, &obj); if (ret) goto fail; + msm_obj = to_msm_bo(obj); + if (use_vram) { struct msm_gem_vma *vma; struct page **pages; - struct msm_gem_object *msm_obj = to_msm_bo(obj); mutex_lock(&msm_obj->lock); @@ -1086,6 +1077,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); } + if (struct_mutex_locked) { + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + } else { + mutex_lock(&dev->struct_mutex); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + mutex_unlock(&dev->struct_mutex); + } + return obj; fail: @@ -1108,6 +1108,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt) { + struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; struct drm_gem_object *obj; uint32_t size; @@ -1121,7 +1122,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, size = PAGE_ALIGN(dmabuf->size); - ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false); + ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); if (ret) goto fail; @@ -1146,6 +1147,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, } mutex_unlock(&msm_obj->lock); + + mutex_lock(&dev->struct_mutex); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + mutex_unlock(&dev->struct_mutex); + return obj; fail: diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index e397c44cc011..39ecb5a18431 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -27,7 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, ring->id = id; ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, - MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova); + MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo, + &ring->iova); if (IS_ERR(ring->start)) { ret = PTR_ERR(ring->start); diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 001fbf537440..a1d94be7883a 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -71,8 +71,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, queue->flags = flags; if (priv->gpu) { - if (prio >= priv->gpu->nr_rings) + if (prio >= priv->gpu->nr_rings) { + kfree(queue); return -EINVAL; + } queue->prio = prio; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index d735ea7e2d88..419a02260bfa 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2032,8 +2032,10 @@ nv50_disp_atomic_commit(struct drm_device *dev, int ret, i; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } ret = drm_atomic_helper_setup_commit(state, nonblock); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index c9692df2b76c..46578108a430 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -83,18 +83,20 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh, { u32 mode = 0x00; - if (asyc->dither.mode == DITHERING_MODE_AUTO) { - if (asyh->base.depth > asyh->or.bpc * 3) - mode = DITHERING_MODE_DYNAMIC2X2; - } else { - mode = asyc->dither.mode; - } + if (asyc->dither.mode) { + if (asyc->dither.mode == DITHERING_MODE_AUTO) { + if (asyh->base.depth > asyh->or.bpc * 3) + mode = DITHERING_MODE_DYNAMIC2X2; + } else { + mode = asyc->dither.mode; + } - if (asyc->dither.depth == DITHERING_DEPTH_AUTO) { - if (asyh->or.bpc >= 8) - mode |= DITHERING_DEPTH_8BPC; - } else { - mode |= asyc->dither.depth; + if (asyc->dither.depth == DITHERING_DEPTH_AUTO) { + if (asyh->or.bpc >= 8) + mode |= DITHERING_DEPTH_8BPC; + } else { + mode |= asyc->dither.depth; + } } asyh->dither.enable = mode; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index eb31c5b6c8e9..0994aee7671a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -568,8 +568,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) pm_runtime_get_noresume(dev->dev); } else { ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return conn_status; + } } nv_encoder = nouveau_connector_ddc_detect(connector); diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 7dfbbbc1beea..5c314f135dd1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -54,8 +54,10 @@ nouveau_debugfs_strap_peek(struct seq_file *m, void *data) int ret; ret = pm_runtime_get_sync(drm->dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(drm->dev->dev); return ret; + } seq_printf(m, "0x%08x\n", nvif_rd32(&drm->client.device.object, 0x101000)); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index b1beed40e746..5347e5bdee8c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1052,8 +1052,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) /* need to bring up power immediately if opening device */ ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } get_task_comm(tmpname, current); snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); @@ -1135,8 +1137,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) long ret; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) { case DRM_NOUVEAU_NVIF: diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index f439f0a5b43a..c09ea357e88f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -189,8 +189,10 @@ nouveau_fbcon_open(struct fb_info *info, int user) struct nouveau_fbdev *fbcon = info->par; struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); int ret = pm_runtime_get_sync(drm->dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put(drm->dev->dev); return ret; + } return 0; } @@ -315,7 +317,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, struct nouveau_framebuffer *fb; struct nouveau_channel *chan; struct nouveau_bo *nvbo; - struct drm_mode_fb_cmd2 mode_cmd; + struct drm_mode_fb_cmd2 mode_cmd = {}; int ret; mode_cmd.width = sizes->surface_width; @@ -592,6 +594,7 @@ fini: drm_fb_helper_fini(&fbcon->helper); free: kfree(fbcon); + drm->fbcon = NULL; return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 1324c19f4e5c..fbfe25422774 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -45,8 +45,10 @@ nouveau_gem_object_del(struct drm_gem_object *gem) int ret; ret = pm_runtime_get_sync(dev); - if (WARN_ON(ret < 0 && ret != -EACCES)) + if (WARN_ON(ret < 0 && ret != -EACCES)) { + pm_runtime_put_autosuspend(dev); return; + } if (gem->import_attach) drm_prime_gem_destroy(gem, nvbo->bo.sg); diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index feaac908efed..34403b810dba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -96,12 +96,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) else nvbe->ttm.ttm.func = &nv50_sgdma_backend; - if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) - /* - * A failing ttm_dma_tt_init() will call ttm_tt_destroy() - * and thus our nouveau_sgdma_destroy() hook, so we don't need - * to free nvbe here. - */ + if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) { + kfree(nvbe); return NULL; + } return &nvbe->ttm.ttm; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c index 9b16a08eb4d9..bf6d41fb0c9f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c @@ -27,10 +27,10 @@ void gm200_hdmi_scdc(struct nvkm_ior *ior, int head, u8 scdc) { struct nvkm_device *device = ior->disp->engine.subdev.device; - const u32 hoff = head * 0x800; + const u32 soff = nv50_ior_base(ior); const u32 ctrl = scdc & 0x3; - nvkm_mask(device, 0x61c5bc + hoff, 0x00000003, ctrl); + nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl); ior->tmds.high_speed = !!(scdc & 0x2); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c index c8ab1b5741a3..db7769cb33eb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c @@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, if (retries) udelay(400); - /* transaction request, wait up to 1ms for it to complete */ + /* transaction request, wait up to 2ms for it to complete */ nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl); - timeout = 1000; + timeout = 2000; do { ctrl = nvkm_rd32(device, 0x00e4e4 + base); udelay(1); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c index 7ef60895f43a..edb6148cbca0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c @@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, if (retries) udelay(400); - /* transaction request, wait up to 1ms for it to complete */ + /* transaction request, wait up to 2ms for it to complete */ nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl); - timeout = 1000; + timeout = 2000; do { ctrl = nvkm_rd32(device, 0x00d954 + base); udelay(1); diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 3c5ddbf30e97..f5e18802e7bc 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -451,11 +451,12 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc, if (omap_state->manually_updated) return; - spin_lock_irq(&crtc->dev->event_lock); drm_crtc_vblank_on(crtc); + ret = drm_crtc_vblank_get(crtc); WARN_ON(ret != 0); + spin_lock_irq(&crtc->dev->event_lock); omap_crtc_arm_event(crtc); spin_unlock_irq(&crtc->dev->event_lock); } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index a834a39e335c..c34a36d3dd0f 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1971,7 +1971,7 @@ static const struct drm_display_mode lg_lb070wv8_mode = { static const struct panel_desc lg_lb070wv8 = { .modes = &lg_lb070wv8_mode, .num_modes = 1, - .bpc = 16, + .bpc = 8, .size = { .width = 151, .height = 91, diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 77c3a3855c68..c05e013bb8e3 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -46,7 +46,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) sg_free_table(&bo->sgts[i]); } } - kfree(bo->sgts); + kvfree(bo->sgts); } drm_gem_shmem_free_object(obj); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 5d75f8cf6477..3dc9b30a64b0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -486,7 +486,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); if (!pages) { - kfree(bo->sgts); + kvfree(bo->sgts); bo->sgts = NULL; mutex_unlock(&bo->base.pages_lock); ret = -ENOMEM; diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index bfc1631093e9..9bdbe0db8795 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -218,7 +218,7 @@ int qxl_device_init(struct qxl_device *qdev, &(qdev->ram_header->cursor_ring_hdr), sizeof(struct qxl_command), QXL_CURSOR_RING_SIZE, - qdev->io_base + QXL_IO_NOTIFY_CMD, + qdev->io_base + QXL_IO_NOTIFY_CURSOR, false, &qdev->cursor_event); diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index c6fd123f60b5..1e62e7bbf1b1 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -4366,7 +4366,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev, table->mc_reg_table_entry[k].mc_data[j] |= 0x100; } j++; - if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) return -EINVAL; if (!pi->mem_gddr5) { @@ -5578,6 +5578,7 @@ static int ci_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; + rdev->pm.dpm.num_ps = 0; for (i = 0; i < state_array->ucNumEntries; i++) { u8 *idx; power_state = (union pplib_power_state *)power_state_offset; @@ -5587,10 +5588,8 @@ static int ci_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.power_state[i].clock_info) return -EINVAL; ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); - if (ps == NULL) { - kfree(rdev->pm.dpm.ps); + if (ps == NULL) return -ENOMEM; - } rdev->pm.dpm.ps[i].ps_priv = ps; ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], non_clock_info, @@ -5612,8 +5611,8 @@ static int ci_parse_power_table(struct radeon_device *rdev) k++; } power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + rdev->pm.dpm.num_ps = i + 1; } - rdev->pm.dpm.num_ps = state_array->ucNumEntries; /* fill in the vce power states */ for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index d9e62ca65ab8..bd2e577c701f 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -2128,7 +2128,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev) if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) ret = -EINVAL; - if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) + if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT)) ret = -EINVAL; if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index b684cd719612..bc63f4cecf5d 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -883,8 +883,10 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (encoder) { @@ -1029,8 +1031,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = radeon_best_single_encoder(connector); @@ -1167,8 +1171,10 @@ radeon_tv_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = radeon_best_single_encoder(connector); @@ -1251,8 +1257,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (radeon_connector->detected_hpd_without_ddc) { @@ -1666,8 +1674,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && radeon_check_hpd_status_unchanged(connector)) { diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0826efd9b5f5..f9f74150d0d7 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -631,8 +631,10 @@ radeon_crtc_set_config(struct drm_mode_set *set, dev = set->crtc->dev; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } ret = drm_crtc_helper_set_config(set, ctx); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 6128792ab883..c2573096d43c 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -174,12 +174,7 @@ int radeon_no_wb; int radeon_modeset = -1; int radeon_dynclks = -1; int radeon_r4xx_atom = 0; -#ifdef __powerpc__ -/* Default to PCI on PowerPC (fdo #95017) */ int radeon_agpmode = -1; -#else -int radeon_agpmode = 0; -#endif int radeon_vram_limit = 0; int radeon_gart_size = -1; /* auto */ int radeon_benchmarking = 0; @@ -555,8 +550,10 @@ long radeon_drm_ioctl(struct file *filp, long ret; dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } ret = drm_ioctl(filp, cmd, arg); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 2bb0187c5bc7..709c4ef5e7d5 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -638,8 +638,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) file_priv->driver_priv = NULL; r = pm_runtime_get_sync(dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(dev->dev); return r; + } /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 1529849e217e..7cdba77b1420 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -23,6 +23,7 @@ config DRM_RCAR_DW_HDMI config DRM_RCAR_LVDS tristate "R-Car DU LVDS Encoder Support" depends on DRM && DRM_BRIDGE && OF + select DRM_KMS_HELPER select DRM_PANEL select OF_FLATTREE select OF_OVERLAY diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index c6430027169f..a0021fc25b27 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -785,13 +785,15 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp) drm_plane_create_alpha_property(&plane->plane); - if (type == DRM_PLANE_TYPE_PRIMARY) - continue; - - drm_object_attach_property(&plane->plane.base, - rcdu->props.colorkey, - RCAR_DU_COLORKEY_NONE); - drm_plane_create_zpos_property(&plane->plane, 1, 1, 7); + if (type == DRM_PLANE_TYPE_PRIMARY) { + drm_plane_create_zpos_immutable_property(&plane->plane, + 0); + } else { + drm_object_attach_property(&plane->plane.base, + rcdu->props.colorkey, + RCAR_DU_COLORKEY_NONE); + drm_plane_create_zpos_property(&plane->plane, 1, 1, 7); + } } return 0; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index 5e4faf258c31..f1a81c9b184d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -392,12 +392,14 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, drm_plane_helper_add(&plane->plane, &rcar_du_vsp_plane_helper_funcs); - if (type == DRM_PLANE_TYPE_PRIMARY) - continue; - - drm_plane_create_alpha_property(&plane->plane); - drm_plane_create_zpos_property(&plane->plane, 1, 1, - vsp->num_planes - 1); + if (type == DRM_PLANE_TYPE_PRIMARY) { + drm_plane_create_zpos_immutable_property(&plane->plane, + 0); + } else { + drm_plane_create_alpha_property(&plane->plane); + drm_plane_create_zpos_property(&plane->plane, 1, 1, + vsp->num_planes - 1); + } } return 0; diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index dfb29e6eeff1..30c5ddd6d081 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -496,8 +496,10 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) fence = sched->ops->run_job(s_job); if (IS_ERR_OR_NULL(fence)) { + if (IS_ERR(fence)) + dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); + s_job->s_fence->parent = NULL; - dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); } else { s_job->s_fence->parent = fence; } @@ -748,8 +750,9 @@ static int drm_sched_main(void *param) r); dma_fence_put(fence); } else { + if (IS_ERR(fence)) + dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); - dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); drm_sched_process_job(NULL, &sched_job->cb); } diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 3ab4fbf8eb0d..51571f7246ab 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -424,9 +424,12 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct ltdc_device *ldev = crtc_to_ltdc(crtc); + struct drm_device *ddev = crtc->dev; DRM_DEBUG_DRIVER("\n"); + pm_runtime_get_sync(ddev->dev); + /* Sets the background color value */ reg_write(ldev->regs, LTDC_BCCR, BCCR_BCBLACK); diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 4e29f4fe4a05..99f081ccc15d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -589,8 +589,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine, /* We can't have an alpha plane at the lowest position */ if (!backend->quirks->supports_lowest_plane_alpha && - (plane_states[0]->fb->format->has_alpha || - (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))) + (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE)) return -EINVAL; for (i = 1; i < num_planes; i++) { @@ -986,7 +985,6 @@ static const struct sun4i_backend_quirks sun6i_backend_quirks = { static const struct sun4i_backend_quirks sun7i_backend_quirks = { .needs_output_muxing = true, - .supports_lowest_plane_alpha = true, }; static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = { diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h index 7ad3f06c127e..00ca35f07ba5 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h @@ -148,7 +148,7 @@ #define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3 #define SUN4I_HDMI_DDC_CLK_REG 0x528 -#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3) +#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0xf) << 3) #define SUN4I_HDMI_DDC_CLK_N(n) ((n) & 0x7) #define SUN4I_HDMI_DDC_LINE_CTRL_REG 0x540 diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c index 2ff780114106..12430b9d4e93 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c @@ -33,7 +33,7 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate, unsigned long best_rate = 0; u8 best_m = 0, best_n = 0, _m, _n; - for (_m = 0; _m < 8; _m++) { + for (_m = 0; _m < 16; _m++) { for (_n = 0; _n < 8; _n++) { unsigned long tmp_rate; diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 9c3bdfd20337..4acdfa608775 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -262,9 +262,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force) struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector); unsigned long reg; - if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg, - reg & SUN4I_HDMI_HPD_HIGH, - 0, 500000)) { + reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG); + if (!(reg & SUN4I_HDMI_HPD_HIGH)) { cec_phys_addr_invalidate(hdmi->cec_adap); return connector_status_disconnected; } diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 27c80c9e2b83..ae7ae432aa4a 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -1409,14 +1409,18 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon, if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) && encoder->encoder_type == DRM_MODE_ENCODER_TMDS) { ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id); - if (ret) + if (ret) { + put_device(&pdev->dev); return ret; + } } if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) { ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id); - if (ret) + if (ret) { + put_device(&pdev->dev); return ret; + } } return 0; @@ -1504,6 +1508,8 @@ const struct of_device_id sun4i_tcon_of_table[] = { { .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks }, { .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks }, { .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks }, + { .compatible = "allwinner,sun7i-a20-tcon0", .data = &sun7i_a20_quirks }, + { .compatible = "allwinner,sun7i-a20-tcon1", .data = &sun7i_a20_quirks }, { .compatible = "allwinner,sun8i-a23-tcon", .data = &sun8i_a33_quirks }, { .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks }, { .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks }, diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 4f944ace665d..f2b288037b90 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -867,7 +867,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi, regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0), sun6i_dsi_dcs_build_pkt_hdr(dsi, msg)); - bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL); + bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL); if (!bounce) return -ENOMEM; @@ -878,7 +878,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi, memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc)); len += sizeof(crc); - regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len); + regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4)); regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1); kfree(bounce); diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 839b49c40e51..767fb440a79d 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -141,7 +141,9 @@ int tegra_display_hub_prepare(struct tegra_display_hub *hub) for (i = 0; i < hub->soc->num_wgrps; i++) { struct tegra_windowgroup *wgrp = &hub->wgrps[i]; - tegra_windowgroup_enable(wgrp); + /* Skip orphaned window group whose parent DC is disabled */ + if (wgrp->parent) + tegra_windowgroup_enable(wgrp); } return 0; @@ -158,7 +160,9 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub) for (i = 0; i < hub->soc->num_wgrps; i++) { struct tegra_windowgroup *wgrp = &hub->wgrps[i]; - tegra_windowgroup_disable(wgrp); + /* Skip orphaned window group whose parent DC is disabled */ + if (wgrp->parent) + tegra_windowgroup_disable(wgrp); } } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index 5584e656b857..8c4fd1aa4c2d 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -143,12 +143,16 @@ static int panel_connector_get_modes(struct drm_connector *connector) int i; for (i = 0; i < timings->num_timings; i++) { - struct drm_display_mode *mode = drm_mode_create(dev); + struct drm_display_mode *mode; struct videomode vm; if (videomode_from_timings(timings, &vm, i)) break; + mode = drm_mode_create(dev); + if (!mode) + break; + drm_display_mode_from_videomode(&vm, mode); mode->type = DRM_MODE_TYPE_DRIVER; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index abf165b2f64f..3ce8ad7603c7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -941,8 +941,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, if (!fence) return 0; - if (no_wait_gpu) + if (no_wait_gpu) { + dma_fence_put(fence); return -EBUSY; + } dma_resv_add_shared_fence(bo->base.resv, fence); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 46dc3de7e81b..f2bad14ac04a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -358,8 +358,10 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { - unsigned long offset = (addr) - vma->vm_start; struct ttm_buffer_object *bo = vma->vm_private_data; + unsigned long offset = (addr) - vma->vm_start + + ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) + << PAGE_SHIFT); int ret; if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages) diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index e0e9b4f69db6..c770ec7e9e8b 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -241,7 +241,6 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, ttm_tt_init_fields(ttm, bo, page_flags); if (ttm_tt_alloc_page_directory(ttm)) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } @@ -265,7 +264,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, INIT_LIST_HEAD(&ttm_dma->pages_list); if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } @@ -287,7 +285,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, else ret = ttm_dma_tt_alloc_page_directory(ttm_dma); if (ret) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c index d733bbc4ac0e..17ff24d999d1 100644 --- a/drivers/gpu/drm/tve200/tve200_display.c +++ b/drivers/gpu/drm/tve200/tve200_display.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -130,9 +131,25 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe, struct drm_connector *connector = priv->connector; u32 format = fb->format->format; u32 ctrl1 = 0; + int retries; clk_prepare_enable(priv->clk); + /* Reset the TVE200 and wait for it to come back online */ + writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4); + for (retries = 0; retries < 5; retries++) { + usleep_range(30000, 50000); + if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) + continue; + else + break; + } + if (retries == 5 && + readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) { + dev_err(drm->dev, "can't get hardware out of reset\n"); + return; + } + /* Function 1 */ ctrl1 |= TVE200_CTRL_CSMODE; /* Interlace mode for CCIR656: parameterize? */ @@ -230,8 +247,9 @@ static void tve200_display_disable(struct drm_simple_display_pipe *pipe) drm_crtc_vblank_off(crtc); - /* Disable and Power Down */ + /* Disable put into reset and Power Down */ writel(0, priv->regs + TVE200_CTRL); + writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4); clk_disable_unprepare(priv->clk); } @@ -279,6 +297,8 @@ static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe) struct drm_device *drm = crtc->dev; struct tve200_drm_dev_private *priv = drm->dev_private; + /* Clear any IRQs and enable */ + writel(0xFF, priv->regs + TVE200_INT_CLR); writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN); return 0; } diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 909eba43664a..204d1df5a21d 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -229,32 +229,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, return 0; } -static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, - uint32_t handle, uint64_t *offset) -{ - struct drm_gem_object *obj; - int ret; - - obj = drm_gem_object_lookup(file, handle); - if (!obj) - return -ENOENT; - - if (!obj->filp) { - ret = -EINVAL; - goto unref; - } - - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto unref; - - *offset = drm_vma_node_offset_addr(&obj->vma_node); -unref: - drm_gem_object_put_unlocked(obj); - - return ret; -} - static struct drm_ioctl_desc vgem_ioctls[] = { DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW), @@ -448,7 +422,6 @@ static struct drm_driver vgem_driver = { .fops = &vgem_driver_fops, .dumb_create = vgem_gem_dumb_create, - .dumb_map_offset = vgem_gem_dumb_map, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f47d5710cc95..33b151988747 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2666,7 +2666,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, ++i; } - if (i != unit) { + if (&con->head == &dev_priv->dev->mode_config.connector_list) { DRM_ERROR("Could not find initial display unit.\n"); ret = -EINVAL; goto out_unlock; @@ -2690,13 +2690,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, break; } - if (mode->type & DRM_MODE_TYPE_PREFERRED) - *p_mode = mode; - else { + if (&mode->head == &con->modes) { WARN_ONCE(true, "Could not find initial preferred mode.\n"); *p_mode = list_first_entry(&con->modes, struct drm_display_mode, head); + } else { + *p_mode = mode; } out_unlock: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 5702219ec38f..7b54c1f56208 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -81,7 +81,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) struct vmw_legacy_display_unit *entry; struct drm_framebuffer *fb = NULL; struct drm_crtc *crtc = NULL; - int i = 0; + int i; /* If there is no display topology the host just assumes * that the guest will set the same layout as the host. @@ -92,12 +92,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) crtc = &entry->base.crtc; w = max(w, crtc->x + crtc->mode.hdisplay); h = max(h, crtc->y + crtc->mode.vdisplay); - i++; } if (crtc == NULL) return 0; - fb = entry->base.crtc.primary->state->fb; + fb = crtc->primary->state->fb; return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], fb->format->cpp[0] * 8, diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 4be49c1aef51..09894a1d343f 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -400,7 +400,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp, args->size = args->pitch * args->height; obj = xen_drm_front_gem_create(dev, args->size); - if (IS_ERR_OR_NULL(obj)) { + if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto fail; } diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index f0b85e094111..4ec8a49241e1 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -83,7 +83,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) size = round_up(size, PAGE_SIZE); xen_obj = gem_create_obj(dev, size); - if (IS_ERR_OR_NULL(xen_obj)) + if (IS_ERR(xen_obj)) return xen_obj; if (drm_info->front_info->cfg.be_alloc) { @@ -117,7 +117,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) */ xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); xen_obj->pages = drm_gem_get_pages(&xen_obj->base); - if (IS_ERR_OR_NULL(xen_obj->pages)) { + if (IS_ERR(xen_obj->pages)) { ret = PTR_ERR(xen_obj->pages); xen_obj->pages = NULL; goto fail; @@ -136,7 +136,7 @@ struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, struct xen_gem_object *xen_obj; xen_obj = gem_create(dev, size); - if (IS_ERR_OR_NULL(xen_obj)) + if (IS_ERR(xen_obj)) return ERR_CAST(xen_obj); return &xen_obj->base; @@ -194,7 +194,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, size = attach->dmabuf->size; xen_obj = gem_create_obj(dev, size); - if (IS_ERR_OR_NULL(xen_obj)) + if (IS_ERR(xen_obj)) return ERR_CAST(xen_obj); ret = gem_alloc_pages_array(xen_obj, size); diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c index 21ad1c359b61..e4dedbb184ab 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_kms.c +++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c @@ -60,7 +60,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp, int ret; fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs); - if (IS_ERR_OR_NULL(fb)) + if (IS_ERR(fb)) return fb; gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 742aa9ff21b8..fcda8621ae6f 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -686,8 +686,17 @@ EXPORT_SYMBOL(host1x_driver_register_full); */ void host1x_driver_unregister(struct host1x_driver *driver) { + struct host1x *host1x; + driver_unregister(&driver->driver); + mutex_lock(&devices_lock); + + list_for_each_entry(host1x, &devices, list) + host1x_detach_driver(host1x, driver); + + mutex_unlock(&devices_lock); + mutex_lock(&drivers_lock); list_del_init(&driver->list); mutex_unlock(&drivers_lock); diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c index c0392672a842..1b4997bda1c7 100644 --- a/drivers/gpu/host1x/debug.c +++ b/drivers/gpu/host1x/debug.c @@ -16,6 +16,8 @@ #include "debug.h" #include "channel.h" +static DEFINE_MUTEX(debug_lock); + unsigned int host1x_debug_trace_cmdbuf; static pid_t host1x_debug_force_timeout_pid; @@ -52,12 +54,14 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo) struct output *o = data; mutex_lock(&ch->cdma.lock); + mutex_lock(&debug_lock); if (show_fifo) host1x_hw_show_channel_fifo(m, ch, o); host1x_hw_show_channel_cdma(m, ch, o); + mutex_unlock(&debug_lock); mutex_unlock(&ch->cdma.lock); return 0; diff --git a/drivers/gpu/imx/ipu-v3/ipu-common.c b/drivers/gpu/imx/ipu-v3/ipu-common.c index ee2a025e54cf..b3dae9ec1a38 100644 --- a/drivers/gpu/imx/ipu-v3/ipu-common.c +++ b/drivers/gpu/imx/ipu-v3/ipu-common.c @@ -124,6 +124,8 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat) case V4L2_PIX_FMT_RGBX32: case V4L2_PIX_FMT_ARGB32: case V4L2_PIX_FMT_XRGB32: + case V4L2_PIX_FMT_RGB32: + case V4L2_PIX_FMT_BGR32: return IPUV3_COLORSPACE_RGB; default: return IPUV3_COLORSPACE_UNKNOWN; diff --git a/drivers/gpu/imx/ipu-v3/ipu-image-convert.c b/drivers/gpu/imx/ipu-v3/ipu-image-convert.c index eeca50d9a1ee..aa1d4b6d278f 100644 --- a/drivers/gpu/imx/ipu-v3/ipu-image-convert.c +++ b/drivers/gpu/imx/ipu-v3/ipu-image-convert.c @@ -137,6 +137,17 @@ struct ipu_image_convert_ctx; struct ipu_image_convert_chan; struct ipu_image_convert_priv; +enum eof_irq_mask { + EOF_IRQ_IN = BIT(0), + EOF_IRQ_ROT_IN = BIT(1), + EOF_IRQ_OUT = BIT(2), + EOF_IRQ_ROT_OUT = BIT(3), +}; + +#define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT) +#define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT | \ + EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT) + struct ipu_image_convert_ctx { struct ipu_image_convert_chan *chan; @@ -173,6 +184,9 @@ struct ipu_image_convert_ctx { /* where to place converted tile in dest image */ unsigned int out_tile_map[MAX_TILES]; + /* mask of completed EOF irqs at every tile conversion */ + enum eof_irq_mask eof_mask; + struct list_head list; }; @@ -189,6 +203,8 @@ struct ipu_image_convert_chan { struct ipuv3_channel *rotation_out_chan; /* the IPU end-of-frame irqs */ + int in_eof_irq; + int rot_in_eof_irq; int out_eof_irq; int rot_out_eof_irq; @@ -1380,6 +1396,9 @@ static int convert_start(struct ipu_image_convert_run *run, unsigned int tile) dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n", __func__, chan->ic_task, ctx, run, tile, dst_tile); + /* clear EOF irq mask */ + ctx->eof_mask = 0; + if (ipu_rot_mode_is_irt(ctx->rot_mode)) { /* swap width/height for resizer */ dest_width = d_image->tile[dst_tile].height; @@ -1615,7 +1634,7 @@ static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx) } /* hold irqlock when calling */ -static irqreturn_t do_irq(struct ipu_image_convert_run *run) +static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run) { struct ipu_image_convert_ctx *ctx = run->ctx; struct ipu_image_convert_chan *chan = ctx->chan; @@ -1700,6 +1719,7 @@ static irqreturn_t do_irq(struct ipu_image_convert_run *run) ctx->cur_buf_num ^= 1; } + ctx->eof_mask = 0; /* clear EOF irq mask for next tile */ ctx->next_tile++; return IRQ_HANDLED; done: @@ -1709,45 +1729,15 @@ done: return IRQ_WAKE_THREAD; } -static irqreturn_t norotate_irq(int irq, void *data) -{ - struct ipu_image_convert_chan *chan = data; - struct ipu_image_convert_ctx *ctx; - struct ipu_image_convert_run *run; - unsigned long flags; - irqreturn_t ret; - - spin_lock_irqsave(&chan->irqlock, flags); - - /* get current run and its context */ - run = chan->current_run; - if (!run) { - ret = IRQ_NONE; - goto out; - } - - ctx = run->ctx; - - if (ipu_rot_mode_is_irt(ctx->rot_mode)) { - /* this is a rotation operation, just ignore */ - spin_unlock_irqrestore(&chan->irqlock, flags); - return IRQ_HANDLED; - } - - ret = do_irq(run); -out: - spin_unlock_irqrestore(&chan->irqlock, flags); - return ret; -} - -static irqreturn_t rotate_irq(int irq, void *data) +static irqreturn_t eof_irq(int irq, void *data) { struct ipu_image_convert_chan *chan = data; struct ipu_image_convert_priv *priv = chan->priv; struct ipu_image_convert_ctx *ctx; struct ipu_image_convert_run *run; + irqreturn_t ret = IRQ_HANDLED; + bool tile_complete = false; unsigned long flags; - irqreturn_t ret; spin_lock_irqsave(&chan->irqlock, flags); @@ -1760,14 +1750,33 @@ static irqreturn_t rotate_irq(int irq, void *data) ctx = run->ctx; - if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { - /* this was NOT a rotation operation, shouldn't happen */ - dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n"); - spin_unlock_irqrestore(&chan->irqlock, flags); - return IRQ_HANDLED; + if (irq == chan->in_eof_irq) { + ctx->eof_mask |= EOF_IRQ_IN; + } else if (irq == chan->out_eof_irq) { + ctx->eof_mask |= EOF_IRQ_OUT; + } else if (irq == chan->rot_in_eof_irq || + irq == chan->rot_out_eof_irq) { + if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { + /* this was NOT a rotation op, shouldn't happen */ + dev_err(priv->ipu->dev, + "Unexpected rotation interrupt\n"); + goto out; + } + ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ? + EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT; + } else { + dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq); + ret = IRQ_NONE; + goto out; } - ret = do_irq(run); + if (ipu_rot_mode_is_irt(ctx->rot_mode)) + tile_complete = (ctx->eof_mask == EOF_IRQ_ROT_COMPLETE); + else + tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE); + + if (tile_complete) + ret = do_tile_complete(run); out: spin_unlock_irqrestore(&chan->irqlock, flags); return ret; @@ -1801,6 +1810,10 @@ static void force_abort(struct ipu_image_convert_ctx *ctx) static void release_ipu_resources(struct ipu_image_convert_chan *chan) { + if (chan->in_eof_irq >= 0) + free_irq(chan->in_eof_irq, chan); + if (chan->rot_in_eof_irq >= 0) + free_irq(chan->rot_in_eof_irq, chan); if (chan->out_eof_irq >= 0) free_irq(chan->out_eof_irq, chan); if (chan->rot_out_eof_irq >= 0) @@ -1819,7 +1832,27 @@ static void release_ipu_resources(struct ipu_image_convert_chan *chan) chan->in_chan = chan->out_chan = chan->rotation_in_chan = chan->rotation_out_chan = NULL; - chan->out_eof_irq = chan->rot_out_eof_irq = -1; + chan->in_eof_irq = -1; + chan->rot_in_eof_irq = -1; + chan->out_eof_irq = -1; + chan->rot_out_eof_irq = -1; +} + +static int get_eof_irq(struct ipu_image_convert_chan *chan, + struct ipuv3_channel *channel) +{ + struct ipu_image_convert_priv *priv = chan->priv; + int ret, irq; + + irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF); + + ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan); + if (ret < 0) { + dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq); + return ret; + } + + return irq; } static int get_ipu_resources(struct ipu_image_convert_chan *chan) @@ -1855,31 +1888,33 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan) } /* acquire the EOF interrupts */ - chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu, - chan->out_chan, - IPU_IRQ_EOF); - - ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh, - 0, "ipu-ic", chan); + ret = get_eof_irq(chan, chan->in_chan); + if (ret < 0) { + chan->in_eof_irq = -1; + goto err; + } + chan->in_eof_irq = ret; + + ret = get_eof_irq(chan, chan->rotation_in_chan); + if (ret < 0) { + chan->rot_in_eof_irq = -1; + goto err; + } + chan->rot_in_eof_irq = ret; + + ret = get_eof_irq(chan, chan->out_chan); if (ret < 0) { - dev_err(priv->ipu->dev, "could not acquire irq %d\n", - chan->out_eof_irq); chan->out_eof_irq = -1; goto err; } + chan->out_eof_irq = ret; - chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu, - chan->rotation_out_chan, - IPU_IRQ_EOF); - - ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh, - 0, "ipu-ic", chan); + ret = get_eof_irq(chan, chan->rotation_out_chan); if (ret < 0) { - dev_err(priv->ipu->dev, "could not acquire irq %d\n", - chan->rot_out_eof_irq); chan->rot_out_eof_irq = -1; goto err; } + chan->rot_out_eof_irq = ret; return 0; err: @@ -2458,6 +2493,8 @@ int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev) chan->ic_task = i; chan->priv = priv; chan->dma_ch = &image_convert_dma_chan[i]; + chan->in_eof_irq = -1; + chan->rot_in_eof_irq = -1; chan->out_eof_irq = -1; chan->rot_out_eof_irq = -1; diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index b2ad319a74b9..d33f5abc8f64 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -25,6 +25,7 @@ #define U1_MOUSE_REPORT_ID 0x01 /* Mouse data ReportID */ #define U1_ABSOLUTE_REPORT_ID 0x03 /* Absolute data ReportID */ +#define U1_ABSOLUTE_REPORT_ID_SECD 0x02 /* FW-PTP Absolute data ReportID */ #define U1_FEATURE_REPORT_ID 0x05 /* Feature ReportID */ #define U1_SP_ABSOLUTE_REPORT_ID 0x06 /* Feature ReportID */ @@ -368,6 +369,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size) case U1_FEATURE_REPORT_ID: break; case U1_ABSOLUTE_REPORT_ID: + case U1_ABSOLUTE_REPORT_ID_SECD: for (i = 0; i < hdata->max_fingers; i++) { u8 *contact = &data[i * 5]; diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index d732d1d10caf..6909c045fece 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -54,6 +54,7 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") struct apple_sc { unsigned long quirks; unsigned int fn_on; + unsigned int fn_found; DECLARE_BITMAP(pressed_numlock, KEY_CNT); }; @@ -339,12 +340,15 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { + struct apple_sc *asc = hid_get_drvdata(hdev); + if (usage->hid == (HID_UP_CUSTOM | 0x0003) || usage->hid == (HID_UP_MSVENDOR | 0x0003) || usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) { /* The fn key on Apple USB keyboards */ set_bit(EV_REP, hi->input->evbit); hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); + asc->fn_found = true; apple_setup_input(hi->input); return 1; } @@ -371,6 +375,19 @@ static int apple_input_mapped(struct hid_device *hdev, struct hid_input *hi, return 0; } +static int apple_input_configured(struct hid_device *hdev, + struct hid_input *hidinput) +{ + struct apple_sc *asc = hid_get_drvdata(hdev); + + if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) { + hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n"); + asc->quirks = 0; + } + + return 0; +} + static int apple_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -585,6 +602,7 @@ static struct hid_driver apple_driver = { .event = apple_event, .input_mapping = apple_input_mapping, .input_mapped = apple_input_mapped, + .input_configured = apple_input_configured, }; module_hid_driver(apple_driver); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 359616e3efbb..d2ecc9c45255 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1597,6 +1597,17 @@ static void hid_output_field(const struct hid_device *hid, } } +/* + * Compute the size of a report. + */ +static size_t hid_compute_report_size(struct hid_report *report) +{ + if (report->size) + return ((report->size - 1) >> 3) + 1; + + return 0; +} + /* * Create a report. 'data' has to be allocated using * hid_alloc_report_buf() so that it has proper size. @@ -1609,7 +1620,7 @@ void hid_output_report(struct hid_report *report, __u8 *data) if (report->id > 0) *data++ = report->id; - memset(data, 0, ((report->size - 1) >> 3) + 1); + memset(data, 0, hid_compute_report_size(report)); for (n = 0; n < report->maxfield; n++) hid_output_field(report->device, report->field[n], data); } @@ -1739,7 +1750,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, csize--; } - rsize = ((report->size - 1) >> 3) + 1; + rsize = hid_compute_report_size(report); if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) rsize = HID_MAX_BUFFER_SIZE - 1; diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c index 45c4f888b7c4..dae193749d44 100644 --- a/drivers/hid/hid-elan.c +++ b/drivers/hid/hid-elan.c @@ -188,6 +188,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER); if (ret) { hid_err(hdev, "Failed to init elan MT slots: %d\n", ret); + input_free_device(input); return ret; } @@ -198,6 +199,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) if (ret) { hid_err(hdev, "Failed to register elan input device: %d\n", ret); + input_mt_destroy_slots(input); input_free_device(input); return ret; } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 13b7222ef2c9..e03a4d794240 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -620,6 +620,7 @@ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2 0xa0c2 #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096 0xa096 +#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293 0xa293 #define USB_VENDOR_ID_IMATION 0x0718 #define USB_DEVICE_ID_DISC_STAKKA 0xd000 @@ -729,6 +730,9 @@ #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d +#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019 +#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e +#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093 0x6093 #define USB_VENDOR_ID_LG 0x1fd2 #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 @@ -770,6 +774,7 @@ #define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b #define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c #define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a +#define USB_DEVICE_ID_LOGITECH_GROUP_AUDIO 0x0882 #define USB_DEVICE_ID_S510_RECEIVER 0xc50c #define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 @@ -841,6 +846,7 @@ #define USB_DEVICE_ID_MS_POWER_COVER 0x07da #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd #define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb +#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0 #define USB_VENDOR_ID_MOJO 0x8282 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 @@ -995,6 +1001,8 @@ #define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO 0x3232 #define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a +#define USB_VENDOR_ID_SAI 0x17dd + #define USB_VENDOR_ID_SAITEK 0x06a3 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 #define USB_DEVICE_ID_SAITEK_PS1000 0x0621 @@ -1004,6 +1012,8 @@ #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 #define USB_DEVICE_ID_SAITEK_X52 0x075c +#define USB_DEVICE_ID_SAITEK_X52_2 0x0255 +#define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762 #define USB_VENDOR_ID_SAMSUNG 0x0419 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 @@ -1147,6 +1157,9 @@ #define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882 0x8882 #define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883 0x8883 +#define USB_VENDOR_ID_TRUST 0x145f +#define USB_DEVICE_ID_TRUST_PANORA_TABLET 0x0212 + #define USB_VENDOR_ID_TURBOX 0x062a #define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201 #define USB_DEVICE_ID_ASUS_MD_5110 0x5110 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index dea9cc65bf80..e3d475f4baf6 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -350,13 +350,13 @@ static int hidinput_query_battery_capacity(struct hid_device *dev) u8 *buf; int ret; - buf = kmalloc(2, GFP_KERNEL); + buf = kmalloc(4, GFP_KERNEL); if (!buf) return -ENOMEM; - ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2, + ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 4, dev->battery_report_type, HID_REQ_GET_REPORT); - if (ret != 2) { + if (ret < 2) { kfree(buf); return -ENODATA; } @@ -1132,6 +1132,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } mapped: + /* Mapping failed, bail out */ + if (!bit) + return; + if (device->driver->input_mapped && device->driver->input_mapped(device, hidinput, field, usage, &bit, &max) < 0) { diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index cd9193078525..60cf80606282 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -2964,7 +2964,7 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp) multiplier = 1; hidpp->vertical_wheel_counter.wheel_multiplier = multiplier; - hid_info(hidpp->hid_dev, "multiplier = %d\n", multiplier); + hid_dbg(hidpp->hid_dev, "wheel multiplier = %d\n", multiplier); return 0; } diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 34138667f8af..abd86903875f 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -535,6 +535,12 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd __set_bit(MSC_RAW, input->mscbit); } + /* + * hid-input may mark device as using autorepeat, but neither + * the trackpad, nor the mouse actually want it. + */ + __clear_bit(EV_REP, input->evbit); + return 0; } diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index 2d8b589201a4..8cb1ca1936e4 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -451,6 +451,8 @@ static const struct hid_device_id ms_devices[] = { .driver_data = MS_SURFACE_DIAL }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER), .driver_data = MS_QUIRK_FF }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS), + .driver_data = MS_QUIRK_FF }, { } }; MODULE_DEVICE_TABLE(hid, ms_devices); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 39e4da7468e1..128d8f4319b9 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -864,6 +864,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, code = BTN_0 + ((usage->hid - 1) & HID_USAGE); hid_map_usage(hi, usage, bit, max, EV_KEY, code); + if (!*bit) + return -1; input_set_capability(hi->input, EV_KEY, code); return 1; diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 90ec2390ef68..0440e2f6e8a3 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -88,6 +88,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, @@ -104,6 +105,9 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET }, @@ -146,6 +150,8 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET }, @@ -168,6 +174,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET }, + { HID_USB_DEVICE(USB_VENDOR_ID_TRUST, USB_DEVICE_ID_TRUST_PANORA_TABLET), HID_QUIRK_MULTI_INPUT | HID_QUIRK_HIDINPUT_FORCE }, { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT }, @@ -177,6 +184,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_GROUP_AUDIO), HID_QUIRK_NOGET }, { 0 } }; @@ -399,9 +407,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, #endif -#if IS_ENABLED(CONFIG_HID_ITE) - { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) }, -#endif #if IS_ENABLED(CONFIG_HID_ICADE) { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, #endif @@ -841,6 +846,7 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) }, { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAI, USB_DEVICE_ID_CYPRESS_HIDCOM) }, #if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB) { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) }, diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 6286204d4c56..a3b151b29bd7 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -526,7 +526,8 @@ static int steam_register(struct steam_device *steam) steam_battery_register(steam); mutex_lock(&steam_devices_lock); - list_add(&steam->list, &steam_devices); + if (list_empty(&steam->list)) + list_add(&steam->list, &steam_devices); mutex_unlock(&steam_devices_lock); } @@ -552,7 +553,7 @@ static void steam_unregister(struct steam_device *steam) hid_info(steam->hdev, "Steam Controller '%s' disconnected", steam->serial_no); mutex_lock(&steam_devices_lock); - list_del(&steam->list); + list_del_init(&steam->list); mutex_unlock(&steam_devices_lock); steam->serial_no[0] = 0; } @@ -738,6 +739,7 @@ static int steam_probe(struct hid_device *hdev, mutex_init(&steam->mutex); steam->quirks = id->driver_data; INIT_WORK(&steam->work_connect, steam_work_connect_cb); + INIT_LIST_HEAD(&steam->list); steam->client_hdev = steam_create_client_hid(hdev); if (IS_ERR(steam->client_hdev)) { diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index b525b2715e07..592176aff027 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -422,6 +422,19 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) dev_err(&client->dev, "failed to change power setting.\n"); set_pwr_exit: + + /* + * The HID over I2C specification states that if a DEVICE needs time + * after the PWR_ON request, it should utilise CLOCK stretching. + * However, it has been observered that the Windows driver provides a + * 1ms sleep between the PWR_ON and RESET requests. + * According to Goodix Windows even waits 60 ms after (other?) + * PWR_ON requests. Testing has confirmed that several devices + * will not work properly without a delay after a PWR_ON request. + */ + if (!ret && power_state == I2C_HID_PWR_ON) + msleep(60); + return ret; } @@ -443,15 +456,6 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) goto out_unlock; - /* - * The HID over I2C specification states that if a DEVICE needs time - * after the PWR_ON request, it should utilise CLOCK stretching. - * However, it has been observered that the Windows driver provides a - * 1ms sleep between the PWR_ON and RESET requests and that some devices - * rely on this. - */ - usleep_range(1000, 5000); - i2c_hid_dbg(ihid, "resetting...\n"); ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c index ec142bc8c1da..35f3bfc3e6f5 100644 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c @@ -373,6 +373,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { }, .driver_data = (void *)&sipodev_desc }, + { + .ident = "Mediacom FlexBook edge 13", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook_edge13-M-FBE13"), + }, + .driver_data = (void *)&sipodev_desc + }, { .ident = "Odys Winbook 13", .matches = { diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c index aa2dbed30fc3..6cf59fd26ad7 100644 --- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c +++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c @@ -480,6 +480,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data, sizeof(ldr_xfer_query_resp)); if (rv < 0) { client_data->flag_retry = true; + *fw_info = (struct shim_fw_info){}; return rv; } @@ -489,6 +490,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data, "data size %d is not equal to size of loader_xfer_query_response %zu\n", rv, sizeof(struct loader_xfer_query_response)); client_data->flag_retry = true; + *fw_info = (struct shim_fw_info){}; return -EMSGSIZE; } diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 35b1fa6d962e..4711fb191a07 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -519,12 +519,16 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, switch (cmd) { case HIDIOCGUSAGE: + if (uref->usage_index >= field->report_count) + goto inval; uref->value = field->value[uref->usage_index]; if (copy_to_user(user_arg, uref, sizeof(*uref))) goto fault; goto goodreturn; case HIDIOCSUSAGE: + if (uref->usage_index >= field->report_count) + goto inval; field->value[uref->usage_index] = uref->value; goto goodreturn; diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index c8296d5e74c3..452307c79e4b 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -769,7 +769,7 @@ static void vmbus_wait_for_unload(void) void *page_addr; struct hv_message *msg; struct vmbus_channel_message_header *hdr; - u32 message_type; + u32 message_type, i; /* * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was @@ -779,8 +779,11 @@ static void vmbus_wait_for_unload(void) * functional and vmbus_unload_response() will complete * vmbus_connection.unload_event. If not, the last thing we can do is * read message pages for all CPUs directly. + * + * Wait no more than 10 seconds so that the panic path can't get + * hung forever in case the response message isn't seen. */ - while (1) { + for (i = 0; i < 1000; i++) { if (completion_done(&vmbus_connection.unload_event)) break; @@ -1354,6 +1357,8 @@ channel_message_table[CHANNELMSG_COUNT] = { { CHANNELMSG_19, 0, NULL }, { CHANNELMSG_20, 0, NULL }, { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL }, + { CHANNELMSG_22, 0, NULL }, + { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL }, }; /* @@ -1365,25 +1370,16 @@ void vmbus_onmessage(void *context) { struct hv_message *msg = context; struct vmbus_channel_message_header *hdr; - int size; hdr = (struct vmbus_channel_message_header *)msg->u.payload; - size = msg->header.payload_size; trace_vmbus_on_message(hdr); - if (hdr->msgtype >= CHANNELMSG_COUNT) { - pr_err("Received invalid channel message type %d size %d\n", - hdr->msgtype, size); - print_hex_dump_bytes("", DUMP_PREFIX_NONE, - (unsigned char *)msg->u.payload, size); - return; - } - - if (channel_message_table[hdr->msgtype].message_handler) - channel_message_table[hdr->msgtype].message_handler(hdr); - else - pr_err("Unhandled channel message type %d\n", hdr->msgtype); + /* + * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go + * out of bound and the message_handler pointer can not be NULL. + */ + channel_message_table[hdr->msgtype].message_handler(hdr); } /* diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 6e4c015783ff..c90d79096e8c 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -67,7 +67,6 @@ static __u32 vmbus_get_next_version(__u32 current_version) int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version) { int ret = 0; - unsigned int cur_cpu; struct vmbus_channel_initiate_contact *msg; unsigned long flags; @@ -100,24 +99,7 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version) msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); - /* - * We want all channel messages to be delivered on CPU 0. - * This has been the behavior pre-win8. This is not - * perf issue and having all channel messages delivered on CPU 0 - * would be ok. - * For post win8 hosts, we support receiving channel messagges on - * all the CPUs. This is needed for kexec to work correctly where - * the CPU attempting to connect may not be CPU 0. - */ - if (version >= VERSION_WIN8_1) { - cur_cpu = get_cpu(); - msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu); - vmbus_connection.connect_cpu = cur_cpu; - put_cpu(); - } else { - msg->target_vcpu = 0; - vmbus_connection.connect_cpu = 0; - } + msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU); /* * Add to list before we send the request since we may diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index fcc52797c169..d6320022af15 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -249,6 +249,13 @@ int hv_synic_cleanup(unsigned int cpu) bool channel_found = false; unsigned long flags; + /* + * Hyper-V does not provide a way to change the connect CPU once + * it is set; we must prevent the connect CPU from going offline. + */ + if (cpu == VMBUS_CONNECT_CPU) + return -EBUSY; + /* * Search for channels which are bound to the CPU we're about to * cleanup. In case we find one and vmbus is still connected we need to diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index af9379a3bf89..cabcb66e7c5e 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -212,12 +212,13 @@ enum vmbus_connect_state { #define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT -struct vmbus_connection { - /* - * CPU on which the initial host contact was made. - */ - int connect_cpu; +/* + * The CPU that Hyper-V will interrupt for VMBUS messages, such as + * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER. + */ +#define VMBUS_CONNECT_CPU 0 +struct vmbus_connection { u32 msg_conn_id; atomic_t offer_in_progress; diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 9cdd434bb340..2d2568dac2a6 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1073,6 +1073,10 @@ void vmbus_on_msg_dpc(unsigned long data) } entry = &channel_message_table[hdr->msgtype]; + + if (!entry->message_handler) + goto msg_handled; + if (entry->handler_type == VMHT_BLOCKING) { ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); if (ctx == NULL) @@ -1092,14 +1096,28 @@ void vmbus_on_msg_dpc(unsigned long data) /* * If we are handling the rescind message; * schedule the work on the global work queue. + * + * The OFFER message and the RESCIND message should + * not be handled by the same serialized work queue, + * because the OFFER handler may call vmbus_open(), + * which tries to open the channel by sending an + * OPEN_CHANNEL message to the host and waits for + * the host's response; however, if the host has + * rescinded the channel before it receives the + * OPEN_CHANNEL message, the host just silently + * ignores the OPEN_CHANNEL message; as a result, + * the guest's OFFER handler hangs for ever, if we + * handle the RESCIND message in the same serialized + * work queue: the RESCIND handler can not start to + * run before the OFFER handler finishes. */ - schedule_work_on(vmbus_connection.connect_cpu, + schedule_work_on(VMBUS_CONNECT_CPU, &ctx->work); break; case CHANNELMSG_OFFERCHANNEL: atomic_inc(&vmbus_connection.offer_in_progress); - queue_work_on(vmbus_connection.connect_cpu, + queue_work_on(VMBUS_CONNECT_CPU, vmbus_connection.work_queue, &ctx->work); break; @@ -1146,7 +1164,7 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel) INIT_WORK(&ctx->work, vmbus_onmessage_work); - queue_work_on(vmbus_connection.connect_cpu, + queue_work_on(VMBUS_CONNECT_CPU, vmbus_connection.work_queue, &ctx->work); } @@ -2213,7 +2231,10 @@ static int vmbus_bus_suspend(struct device *dev) if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0) wait_for_completion(&vmbus_connection.ready_for_suspend_event); - WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0); + if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) { + pr_err("Can not suspend due to a previous failed resuming\n"); + return -EBUSY; + } mutex_lock(&vmbus_connection.channel_mutex); @@ -2287,7 +2308,9 @@ static int vmbus_bus_resume(struct device *dev) vmbus_request_offers(); - wait_for_completion(&vmbus_connection.ready_for_resume_event); + if (wait_for_completion_timeout( + &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0) + pr_err("Some vmbus device is missing after suspending?\n"); /* Reset the event for the next suspend. */ reinit_completion(&vmbus_connection.ready_for_suspend_event); diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 4cf25458f0b9..740ac0a1b726 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -883,7 +883,7 @@ static int acpi_power_meter_add(struct acpi_device *device) res = setup_attrs(resource); if (res) - goto exit_free; + goto exit_free_capability; resource->hwmon_dev = hwmon_device_register(&device->dev); if (IS_ERR(resource->hwmon_dev)) { @@ -896,6 +896,8 @@ static int acpi_power_meter_add(struct acpi_device *device) exit_remove: remove_attrs(resource); +exit_free_capability: + free_capabilities(resource); exit_free: kfree(resource); exit: diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 183ff3d25129..006bc07bcd30 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -748,15 +748,18 @@ static ssize_t applesmc_light_show(struct device *dev, } ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); + if (ret) + goto out; /* newer macbooks report a single 10-bit bigendian value */ if (data_length == 10) { left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2; goto out; } left = buffer[2]; + + ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); if (ret) goto out; - ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); right = buffer[2]; out: @@ -805,12 +808,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev, to_index(attr)); ret = applesmc_read_key(newkey, buffer, 2); - speed = ((buffer[0] << 8 | buffer[1]) >> 2); - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); + + speed = ((buffer[0] << 8 | buffer[1]) >> 2); + return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); } static ssize_t applesmc_store_fan_speed(struct device *dev, @@ -846,12 +848,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev, u8 buffer[2]; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); - manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); + + manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); } static ssize_t applesmc_store_fan_manual(struct device *dev, @@ -867,10 +868,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, return -EINVAL; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); - val = (buffer[0] << 8 | buffer[1]); if (ret) goto out; + val = (buffer[0] << 8 | buffer[1]); + if (input) val = val | (0x01 << to_index(attr)); else @@ -946,13 +948,12 @@ static ssize_t applesmc_key_count_show(struct device *dev, u32 count; ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4); - count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + - ((u32)buffer[2]<<8) + buffer[3]; - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); + + count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + + ((u32)buffer[2]<<8) + buffer[3]; + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); } static ssize_t applesmc_key_at_index_read_show(struct device *dev, diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c index 40c489be62ea..40f3139f1e02 100644 --- a/drivers/hwmon/aspeed-pwm-tacho.c +++ b/drivers/hwmon/aspeed-pwm-tacho.c @@ -851,6 +851,8 @@ static int aspeed_create_fan(struct device *dev, ret = of_property_read_u32(child, "reg", &pwm_port); if (ret) return ret; + if (pwm_port >= ARRAY_SIZE(pwm_port_params)) + return -EINVAL; aspeed_create_pwm_port(priv, (u8)pwm_port); ret = of_property_count_u8_elems(child, "cooling-levels"); diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c index 491a570e8e50..924c02c1631d 100644 --- a/drivers/hwmon/emc2103.c +++ b/drivers/hwmon/emc2103.c @@ -443,7 +443,7 @@ static ssize_t pwm1_enable_store(struct device *dev, } result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg); - if (result) { + if (result < 0) { count = result; goto err; } diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 5c1dddde193c..f96fd8efb45a 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -349,6 +349,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, {} diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c index 743752a2467a..64122eb38060 100644 --- a/drivers/hwmon/max6697.c +++ b/drivers/hwmon/max6697.c @@ -38,8 +38,9 @@ static const u8 MAX6697_REG_CRIT[] = { * Map device tree / platform data register bit map to chip bit map. * Applies to alert register and over-temperature register. */ -#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ +#define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ (((reg) & 0x01) << 6) | ((reg) & 0x80)) +#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7)) #define MAX6697_REG_STAT(n) (0x44 + (n)) @@ -562,12 +563,12 @@ static int max6697_init_chip(struct max6697_data *data, return ret; ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK, - MAX6697_MAP_BITS(pdata->alert_mask)); + MAX6697_ALERT_MAP_BITS(pdata->alert_mask)); if (ret < 0) return ret; ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK, - MAX6697_MAP_BITS(pdata->over_temperature_mask)); + MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask)); if (ret < 0) return ret; diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 7efa6bfef060..ba9b96973e80 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -786,13 +786,13 @@ static const char *const nct6798_temp_label[] = { "Agent1 Dimm1", "BYTE_TEMP0", "BYTE_TEMP1", - "", - "", + "PECI Agent 0 Calibration", /* undocumented */ + "PECI Agent 1 Calibration", /* undocumented */ "", "Virtual_TEMP" }; -#define NCT6798_TEMP_MASK 0x8fff0ffe +#define NCT6798_TEMP_MASK 0xbfff0ffe #define NCT6798_VIRT_TEMP_MASK 0x80000c00 /* NCT6102D/NCT6106D specific data */ diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index dfb122b5e1b7..b812b199e5e5 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -197,7 +197,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel, if (ret < 0) return ret; cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f); - if (cnt == 0x1fff) + if (cnt == 0 || cnt == 0x1fff) rpm = 0; else rpm = 1350000 / cnt; @@ -209,7 +209,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel, if (ret < 0) return ret; cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f); - if (cnt == 0x1fff) + if (cnt == 0 || cnt == 0x1fff) rpm = 0; else rpm = 1350000 / cnt; diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c index 5caa37fbfc18..66b12e5ccbc6 100644 --- a/drivers/hwmon/pmbus/adm1275.c +++ b/drivers/hwmon/pmbus/adm1275.c @@ -454,6 +454,7 @@ MODULE_DEVICE_TABLE(i2c, adm1275_id); static int adm1275_probe(struct i2c_client *client, const struct i2c_device_id *id) { + s32 (*config_read_fn)(const struct i2c_client *client, u8 reg); u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1]; int config, device_config; int ret; @@ -499,11 +500,16 @@ static int adm1275_probe(struct i2c_client *client, "Device mismatch: Configured %s, detected %s\n", id->name, mid->name); - config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG); + if (mid->driver_data == adm1272 || mid->driver_data == adm1278 || + mid->driver_data == adm1293 || mid->driver_data == adm1294) + config_read_fn = i2c_smbus_read_word_data; + else + config_read_fn = i2c_smbus_read_byte_data; + config = config_read_fn(client, ADM1275_PMON_CONFIG); if (config < 0) return config; - device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG); + device_config = config_read_fn(client, ADM1275_DEVICE_CONFIG); if (device_config < 0) return device_config; diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c index 8a7732c0bef3..7cd13a217c61 100644 --- a/drivers/hwmon/scmi-hwmon.c +++ b/drivers/hwmon/scmi-hwmon.c @@ -147,7 +147,7 @@ static enum hwmon_sensor_types scmi_types[] = { [ENERGY] = hwmon_energy, }; -static u32 hwmon_attributes[] = { +static u32 hwmon_attributes[hwmon_max] = { [hwmon_chip] = HWMON_C_REGISTER_TZ, [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL, [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL, diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index d0cc3985b72a..6375504ba8b0 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -596,13 +596,6 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) goto out; } - /* There is no point in reading a TMC in HW FIFO mode */ - mode = readl_relaxed(drvdata->base + TMC_MODE); - if (mode != TMC_MODE_CIRCULAR_BUFFER) { - ret = -EINVAL; - goto out; - } - /* Don't interfere if operated from Perf */ if (drvdata->mode == CS_MODE_PERF) { ret = -EINVAL; @@ -616,8 +609,15 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) } /* Disable the TMC if need be */ - if (drvdata->mode == CS_MODE_SYSFS) + if (drvdata->mode == CS_MODE_SYSFS) { + /* There is no point in reading a TMC in HW FIFO mode */ + mode = readl_relaxed(drvdata->base + TMC_MODE); + if (mode != TMC_MODE_CIRCULAR_BUFFER) { + ret = -EINVAL; + goto out; + } __tmc_etb_disable_hw(drvdata); + } drvdata->reading = true; out: @@ -639,15 +639,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) spin_lock_irqsave(&drvdata->spinlock, flags); - /* There is no point in reading a TMC in HW FIFO mode */ - mode = readl_relaxed(drvdata->base + TMC_MODE); - if (mode != TMC_MODE_CIRCULAR_BUFFER) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); - return -EINVAL; - } - /* Re-enable the TMC if need be */ if (drvdata->mode == CS_MODE_SYSFS) { + /* There is no point in reading a TMC in HW FIFO mode */ + mode = readl_relaxed(drvdata->base + TMC_MODE); + if (mode != TMC_MODE_CIRCULAR_BUFFER) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return -EINVAL; + } /* * The trace run will continue with the same allocated trace * buffer. As such zero-out the buffer so that we don't end diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index ca232ec565e8..c9ac3dc65113 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -1021,15 +1021,30 @@ int intel_th_set_output(struct intel_th_device *thdev, { struct intel_th_device *hub = to_intel_th_hub(thdev); struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); + int ret; /* In host mode, this is up to the external debugger, do nothing. */ if (hub->host_mode) return 0; - if (!hubdrv->set_output) - return -ENOTSUPP; + /* + * hub is instantiated together with the source device that + * calls here, so guaranteed to be present. + */ + hubdrv = to_intel_th_driver(hub->dev.driver); + if (!hubdrv || !try_module_get(hubdrv->driver.owner)) + return -EINVAL; - return hubdrv->set_output(hub, master); + if (!hubdrv->set_output) { + ret = -ENOTSUPP; + goto out; + } + + ret = hubdrv->set_output(hub, master); + +out: + module_put(hubdrv->driver.owner); + return ret; } EXPORT_SYMBOL_GPL(intel_th_set_output); diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 86aa6a46bcba..0d26484d6795 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -229,11 +229,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Tiger Lake PCH-H */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x43a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Jasper Lake PCH */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Jasper Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4e29), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Elkhart Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529), @@ -244,6 +254,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Emmitsburg PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1bcc), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c index 3a1f4e650378..a1529f571491 100644 --- a/drivers/hwtracing/intel_th/sth.c +++ b/drivers/hwtracing/intel_th/sth.c @@ -161,9 +161,7 @@ static int sth_stm_link(struct stm_data *stm_data, unsigned int master, { struct sth_device *sth = container_of(stm_data, struct sth_device, stm); - intel_th_set_output(to_intel_th_device(sth->dev), master); - - return 0; + return intel_th_set_output(to_intel_th_device(sth->dev), master); } static int intel_th_sw_init(struct sth_device *sth) diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index 5ac93f41bfec..1d3691a049b1 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -41,8 +41,22 @@ static void pca_reset(struct i2c_algo_pca_data *adap) pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET); pca_outw(adap, I2C_PCA_IND, 0xA5); pca_outw(adap, I2C_PCA_IND, 0x5A); + + /* + * After a reset we need to re-apply any configuration + * (calculated in pca_init) to get the bus in a working state. + */ + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IMODE); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.mode); + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLL); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.tlow); + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLH); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.thi); + + pca_set_con(adap, I2C_PCA_CON_ENSIO); } else { adap->reset_chip(adap->data); + pca_set_con(adap, I2C_PCA_CON_ENSIO | adap->bus_settings.clock_freq); } } @@ -314,7 +328,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, DEB2("BUS ERROR - SDA Stuck low\n"); pca_reset(adap); goto out; - case 0x90: /* Bus error - SCL stuck low */ + case 0x78: /* Bus error - SCL stuck low (PCA9665) */ + case 0x90: /* Bus error - SCL stuck low (PCA9564) */ DEB2("BUS ERROR - SCL Stuck low\n"); pca_reset(adap); goto out; @@ -422,13 +437,14 @@ static int pca_init(struct i2c_adapter *adap) " Use the nominal frequency.\n", adap->name); } - pca_reset(pca_data); - clock = pca_clock(pca_data); printk(KERN_INFO "%s: Clock frequency is %dkHz\n", adap->name, freqs[clock]); - pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock); + /* Store settings as these will be needed when the PCA chip is reset */ + pca_data->bus_settings.clock_freq = clock; + + pca_reset(pca_data); } else { int clock; int mode; @@ -495,19 +511,15 @@ static int pca_init(struct i2c_adapter *adap) thi = tlow * min_thi / min_tlow; } + /* Store settings as these will be needed when the PCA chip is reset */ + pca_data->bus_settings.mode = mode; + pca_data->bus_settings.tlow = tlow; + pca_data->bus_settings.thi = thi; + pca_reset(pca_data); printk(KERN_INFO "%s: Clock frequency is %dHz\n", adap->name, clock * 100); - - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IMODE); - pca_outw(pca_data, I2C_PCA_IND, mode); - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLL); - pca_outw(pca_data, I2C_PCA_IND, tlow); - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLH); - pca_outw(pca_data, I2C_PCA_IND, thi); - - pca_set_con(pca_data, I2C_PCA_CON_ENSIO); } udelay(500); /* 500 us for oscillator to stabilise */ diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 03475f179973..dd9661c11782 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -1037,7 +1037,7 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave) if (!iproc_i2c->slave) return -EINVAL; - iproc_i2c->slave = NULL; + disable_irq(iproc_i2c->irq); /* disable all slave interrupts */ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); @@ -1050,6 +1050,17 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave) tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT); iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp); + /* flush TX/RX FIFOs */ + tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT)); + iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp); + + /* clear all pending slave interrupts */ + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE); + + iproc_i2c->slave = NULL; + + enable_irq(iproc_i2c->irq); + return 0; } diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 9d71ce15db05..a51d3b795770 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -377,10 +377,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + if (id->recv_count > CDNS_I2C_FIFO_DEPTH) ctrl_reg |= CDNS_I2C_CR_HOLD; - else - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); @@ -437,11 +435,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + if (id->send_count > CDNS_I2C_FIFO_DEPTH) ctrl_reg |= CDNS_I2C_CR_HOLD; - else - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; - cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); /* Clear the interrupts in interrupt status register. */ diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index bb810dee8fb5..73f139690e4e 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -180,6 +180,7 @@ static const struct pci_device_id pch_pcidev_id[] = { { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, }, {0,} }; +MODULE_DEVICE_TABLE(pci, pch_pcidev_id); static irqreturn_t pch_i2c_handler(int irq, void *pData); diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c index e0c256922d4f..977d6f524649 100644 --- a/drivers/i2c/busses/i2c-fsi.c +++ b/drivers/i2c/busses/i2c-fsi.c @@ -98,7 +98,7 @@ #define I2C_STAT_DAT_REQ BIT(25) #define I2C_STAT_CMD_COMP BIT(24) #define I2C_STAT_STOP_ERR BIT(23) -#define I2C_STAT_MAX_PORT GENMASK(19, 16) +#define I2C_STAT_MAX_PORT GENMASK(22, 16) #define I2C_STAT_ANY_INT BIT(15) #define I2C_STAT_SCL_IN BIT(11) #define I2C_STAT_SDA_IN BIT(10) diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 3ff6fbd79b12..9a80c3c7e8af 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1688,6 +1688,16 @@ static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; } static inline void i801_acpi_remove(struct i801_priv *priv) { } #endif +static unsigned char i801_setup_hstcfg(struct i801_priv *priv) +{ + unsigned char hstcfg = priv->original_hstcfg; + + hstcfg &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */ + hstcfg |= SMBHSTCFG_HST_EN; + pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg); + return hstcfg; +} + static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) { unsigned char temp; @@ -1804,14 +1814,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) return err; } - pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp); - priv->original_hstcfg = temp; - temp &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */ - if (!(temp & SMBHSTCFG_HST_EN)) { + pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &priv->original_hstcfg); + temp = i801_setup_hstcfg(priv); + if (!(priv->original_hstcfg & SMBHSTCFG_HST_EN)) dev_info(&dev->dev, "Enabling SMBus device\n"); - temp |= SMBHSTCFG_HST_EN; - } - pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp); if (temp & SMBHSTCFG_SMB_SMI_EN) { dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n"); @@ -1937,6 +1943,7 @@ static int i801_resume(struct device *dev) { struct i801_priv *priv = dev_get_drvdata(dev); + i801_setup_hstcfg(priv); i801_enable_host_notify(&priv->adapter); return 0; diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c index 8382eb64b424..d6c17506dba4 100644 --- a/drivers/i2c/busses/i2c-icy.c +++ b/drivers/i2c/busses/i2c-icy.c @@ -43,6 +43,7 @@ #include #include +#include #include #include diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c index 2fd717d8dd30..71d7bae2cbca 100644 --- a/drivers/i2c/busses/i2c-mlxcpld.c +++ b/drivers/i2c/busses/i2c-mlxcpld.c @@ -337,9 +337,9 @@ static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv) if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) { mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG, &datalen, 1); - if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) { + if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) { dev_err(priv->dev, "Incorrect smbus block read message len\n"); - return -E2BIG; + return -EPROTO; } } else { datalen = priv->xfer.data_len; diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 89224913f578..081a1169ecea 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -25,6 +25,7 @@ #include #include #include +#include #define DRIVER_NAME "mxs-i2c" @@ -200,7 +201,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap, dma_map_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE); desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[0], 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_PREP_INTERRUPT | + MXS_DMA_CTRL_WAIT4END); if (!desc) { dev_err(i2c->dev, "Failed to get DMA data write descriptor.\n"); @@ -228,7 +230,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap, dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE); desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_PREP_INTERRUPT | + MXS_DMA_CTRL_WAIT4END); if (!desc) { dev_err(i2c->dev, "Failed to get DMA data write descriptor.\n"); @@ -260,7 +263,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap, dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE); desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_PREP_INTERRUPT | + MXS_DMA_CTRL_WAIT4END); if (!desc) { dev_err(i2c->dev, "Failed to get DMA data write descriptor.\n"); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 30ded6422e7b..69740a4ff1db 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -977,7 +977,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) } if (dev->vendor == PCI_VENDOR_ID_AMD && - dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) { + (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS || + dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) { retval = piix4_setup_sb800(dev, id, 1); } diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 2c3c3d6935c0..d0c557c8d80f 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -312,11 +312,10 @@ static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why) dev_err(dev, "IBMR: %08x IDBR: %08x ICR: %08x ISR: %08x\n", readl(_IBMR(i2c)), readl(_IDBR(i2c)), readl(_ICR(i2c)), readl(_ISR(i2c))); - dev_dbg(dev, "log: "); + dev_err(dev, "log:"); for (i = 0; i < i2c->irqlogidx; i++) - pr_debug("[%08x:%08x] ", i2c->isrlog[i], i2c->icrlog[i]); - - pr_debug("\n"); + pr_cont(" [%03x:%05x]", i2c->isrlog[i], i2c->icrlog[i]); + pr_cont("\n"); } #else /* ifdef DEBUG */ @@ -706,11 +705,9 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c) { u32 icr; - /* - * Clear the STOP and ACK flags - */ + /* Clear the START, STOP, ACK, TB and MA flags */ icr = readl(_ICR(i2c)); - icr &= ~(ICR_STOP | ICR_ACKNAK); + icr &= ~(ICR_START | ICR_STOP | ICR_ACKNAK | ICR_TB | ICR_MA); writel(icr, _ICR(i2c)); } diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 17abf60c94ae..aafc76ee93e0 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -368,7 +368,6 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, geni_se_select_mode(se, GENI_SE_FIFO); writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN); - geni_se_setup_m_cmd(se, I2C_READ, m_param); if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) { geni_se_select_mode(se, GENI_SE_FIFO); @@ -376,6 +375,8 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, dma_buf = NULL; } + geni_se_setup_m_cmd(se, I2C_READ, m_param); + time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); if (!time_left) geni_i2c_abort_xfer(gi2c); @@ -409,7 +410,6 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, geni_se_select_mode(se, GENI_SE_FIFO); writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN); - geni_se_setup_m_cmd(se, I2C_WRITE, m_param); if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) { geni_se_select_mode(se, GENI_SE_FIFO); @@ -417,6 +417,8 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, dma_buf = NULL; } + geni_se_setup_m_cmd(se, I2C_WRITE, m_param); + if (!dma_buf) /* Get FIFO IRQ */ writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG); diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 531c01100b56..9c162a01a584 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -580,13 +580,15 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR); } - rcar_i2c_write(priv, ICSSR, ~SAR & 0xff); + /* Clear SSR, too, because of old STOPs to other clients than us */ + rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff); } /* master sent stop */ if (ssr_filtered & SSR) { i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); - rcar_i2c_write(priv, ICSIER, SAR | SSR); + rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */ + rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSSR, ~SSR & 0xff); } @@ -850,7 +852,7 @@ static int rcar_reg_slave(struct i2c_client *slave) priv->slave = slave; rcar_i2c_write(priv, ICSAR, slave->addr); rcar_i2c_write(priv, ICSSR, 0); - rcar_i2c_write(priv, ICSIER, SAR | SSR); + rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSCR, SIE | SDBS); return 0; @@ -862,11 +864,14 @@ static int rcar_unreg_slave(struct i2c_client *slave) WARN_ON(!priv->slave); - /* disable irqs and ensure none is running before clearing ptr */ + /* ensure no irq is running before clearing ptr */ + disable_irq(priv->irq); rcar_i2c_write(priv, ICSIER, 0); - rcar_i2c_write(priv, ICSCR, 0); + rcar_i2c_write(priv, ICSSR, 0); + enable_irq(priv->irq); + rcar_i2c_write(priv, ICSCR, SDBS); + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ - synchronize_irq(priv->irq); priv->slave = NULL; pm_runtime_put(rcar_i2c_priv_to_dev(priv)); @@ -971,6 +976,8 @@ static int rcar_i2c_probe(struct platform_device *pdev) if (ret < 0) goto out_pm_put; + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ + if (priv->devtype == I2C_RCAR_GEN3) { priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (!IS_ERR(priv->rstc)) { diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index dbc43cfec19d..331f7cca9bab 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1719,14 +1719,9 @@ static int tegra_i2c_remove(struct platform_device *pdev) static int __maybe_unused tegra_i2c_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); - int err; i2c_mark_adapter_suspended(&i2c_dev->adapter); - err = pm_runtime_force_suspend(dev); - if (err < 0) - return err; - return 0; } @@ -1747,10 +1742,6 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) if (err) return err; - err = pm_runtime_force_resume(dev); - if (err < 0) - return err; - i2c_mark_adapter_resumed(&i2c_dev->adapter); return 0; diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index cc193f2ba5d3..def62d5b42ca 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -354,7 +354,7 @@ static int i2c_device_probe(struct device *dev) * or ACPI ID table is supplied for the probing device. */ if (!driver->id_table && - !i2c_acpi_match_device(dev->driver->acpi_match_table, client) && + !acpi_driver_match_device(dev, dev->driver) && !i2c_of_match_device(dev->driver->of_match_table, client)) { status = -ENODEV; goto put_sync_adapter; diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c index 5427f047faf0..1589179d5eb9 100644 --- a/drivers/i2c/i2c-core-slave.c +++ b/drivers/i2c/i2c-core-slave.c @@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) { int ret; - if (!client || !slave_cb) { - WARN(1, "insufficient data\n"); + if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n")) return -EINVAL; - } if (!(client->flags & I2C_CLIENT_SLAVE)) dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n", @@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client) { int ret; + if (IS_ERR_OR_NULL(client)) + return -EINVAL; + if (!client->adapter->algo->unreg_slave) { dev_err(&client->dev, "%s: not supported by adapter\n", __func__); return -EOPNOTSUPP; diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index 3ac426a8ab5a..c2ae8c8cd429 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -495,6 +495,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, break; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: + if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) { + dev_err(&adapter->dev, + "Invalid block size returned: %d\n", + msg[1].buf[0]); + status = -EPROTO; + goto cleanup; + } for (i = 0; i < msg[1].buf[0] + 1; i++) data->block[i] = msg[1].buf[i]; break; diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 121b4e89f038..bcdf25f32e22 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -189,6 +189,14 @@ struct bmc150_accel_data { struct mutex mutex; u8 fifo_mode, watermark; s16 buffer[8]; + /* + * Ensure there is sufficient space and correct alignment for + * the timestamp if enabled + */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; u8 bw_bits; u32 slope_dur; u32 slope_thres; @@ -922,15 +930,16 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev, * now. */ for (i = 0; i < count; i++) { - u16 sample[8]; int j, bit; j = 0; for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) - memcpy(&sample[j++], &buffer[i * 3 + bit], 2); + memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit], + sizeof(data->scan.channels[0])); - iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp); + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + tstamp); tstamp += sample_period; } diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index 0b876b2dc5bd..76429e2a6fb8 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -209,14 +209,20 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) const struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct kxsd9_state *st = iio_priv(indio_dev); + /* + * Ensure correct positioning and alignment of timestamp. + * No need to zero initialize as all elements written. + */ + struct { + __be16 chan[4]; + s64 ts __aligned(8); + } hw_values; int ret; - /* 4 * 16bit values AND timestamp */ - __be16 hw_values[8]; ret = regmap_bulk_read(st->map, KXSD9_REG_X, - &hw_values, - 8); + hw_values.chan, + sizeof(hw_values.chan)); if (ret) { dev_err(st->dev, "error reading data\n"); @@ -224,7 +230,7 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, - hw_values, + &hw_values, iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c index 8b5a6aff9bf4..70ec3490bdb8 100644 --- a/drivers/iio/accel/mma7455_core.c +++ b/drivers/iio/accel/mma7455_core.c @@ -52,6 +52,14 @@ struct mma7455_data { struct regmap *regmap; + /* + * Used to reorganize data. Will ensure correct alignment of + * the timestamp if present + */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; }; static int mma7455_drdy(struct mma7455_data *mma7455) @@ -82,19 +90,19 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mma7455_data *mma7455 = iio_priv(indio_dev); - u8 buf[16]; /* 3 x 16-bit channels + padding + ts */ int ret; ret = mma7455_drdy(mma7455); if (ret) goto done; - ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, buf, - sizeof(__le16) * 3); + ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, + mma7455->scan.channels, + sizeof(mma7455->scan.channels)); if (ret) goto done; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan, iio_get_time_ns(indio_dev)); done: diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 00e100fc845a..85d453b3f5ec 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -110,6 +110,12 @@ struct mma8452_data { int sleep_val; struct regulator *vdd_reg; struct regulator *vddio_reg; + + /* Ensure correct alignment of time stamp when present */ + struct { + __be16 channels[3]; + s64 ts __aligned(8); + } buffer; }; /** @@ -1091,14 +1097,13 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mma8452_data *data = iio_priv(indio_dev); - u8 buffer[16]; /* 3 16-bit channels + padding + ts */ int ret; - ret = mma8452_read(data, (__be16 *)buffer); + ret = mma8452_read(data, data->buffer.channels); if (ret < 0) goto done; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, + iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, iio_get_time_ns(indio_dev)); done: @@ -1685,10 +1690,13 @@ static int mma8452_probe(struct i2c_client *client, ret = mma8452_set_freefall_mode(data, false); if (ret < 0) - goto buffer_cleanup; + goto unregister_device; return 0; +unregister_device: + iio_device_unregister(indio_dev); + buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c index 217a5a5c3c6d..7e741294de7b 100644 --- a/drivers/iio/adc/ad7780.c +++ b/drivers/iio/adc/ad7780.c @@ -309,7 +309,7 @@ static int ad7780_probe(struct spi_device *spi) ret = ad7780_init_gpios(&spi->dev, st); if (ret) - goto error_cleanup_buffer_and_trigger; + return ret; st->reg = devm_regulator_get(&spi->dev, "avdd"); if (IS_ERR(st->reg)) diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index bdd7cba6f6b0..d3e9ec00ef95 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -146,6 +146,11 @@ struct ina2xx_chip_info { int range_vbus; /* Bus voltage maximum in V */ int pga_gain_vshunt; /* Shunt voltage PGA gain */ bool allow_async_readout; + /* data buffer needs space for channel data and timestamp */ + struct { + u16 chan[4]; + u64 ts __aligned(8); + } scan; }; static const struct ina2xx_config ina2xx_config[] = { @@ -738,8 +743,6 @@ static int ina2xx_conversion_ready(struct iio_dev *indio_dev) static int ina2xx_work_buffer(struct iio_dev *indio_dev) { struct ina2xx_chip_info *chip = iio_priv(indio_dev); - /* data buffer needs space for channel data and timestap */ - unsigned short data[4 + sizeof(s64)/sizeof(short)]; int bit, ret, i = 0; s64 time; @@ -758,10 +761,10 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) if (ret < 0) return ret; - data[i++] = val; + chip->scan.chan[i++] = val; } - iio_push_to_buffers_with_timestamp(indio_dev, data, time); + iio_push_to_buffers_with_timestamp(indio_dev, &chip->scan, time); return 0; }; diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c index 3b6f3b9a6c5b..a1b66f92e1bf 100644 --- a/drivers/iio/adc/max1118.c +++ b/drivers/iio/adc/max1118.c @@ -35,6 +35,11 @@ struct max1118 { struct spi_device *spi; struct mutex lock; struct regulator *reg; + /* Ensure natural alignment of buffer elements */ + struct { + u8 channels[2]; + s64 ts __aligned(8); + } scan; u8 data ____cacheline_aligned; }; @@ -159,7 +164,6 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct max1118 *adc = iio_priv(indio_dev); - u8 data[16] = { }; /* 2x 8-bit ADC data + padding + 8 bytes timestamp */ int scan_index; int i = 0; @@ -177,10 +181,10 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) goto out; } - data[i] = ret; + adc->scan.channels[i] = ret; i++; } - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, iio_get_time_ns(indio_dev)); out: mutex_unlock(&adc->lock); diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index ea24d7c58b12..8ae4cf135157 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c @@ -95,16 +95,12 @@ static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig) { int ret; - mutex_lock(&adc->lock); - ret = i2c_master_send(adc->i2c, &newconfig, 1); if (ret > 0) { adc->config = newconfig; ret = 0; } - mutex_unlock(&adc->lock); - return ret; } @@ -137,6 +133,8 @@ static int mcp3422_read_channel(struct mcp3422 *adc, u8 config; u8 req_channel = channel->channel; + mutex_lock(&adc->lock); + if (req_channel != MCP3422_CHANNEL(adc->config)) { config = adc->config; config &= ~MCP3422_CHANNEL_MASK; @@ -144,12 +142,18 @@ static int mcp3422_read_channel(struct mcp3422 *adc, config &= ~MCP3422_PGA_MASK; config |= MCP3422_PGA_VALUE(adc->pga[req_channel]); ret = mcp3422_update_config(adc, config); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&adc->lock); return ret; + } msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]); } - return mcp3422_read(adc, value, &config); + ret = mcp3422_read(adc, value, &config); + + mutex_unlock(&adc->lock); + + return ret; } static int mcp3422_read_raw(struct iio_dev *iio, diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c index 0235863ff77b..cc8cbffe2b7b 100644 --- a/drivers/iio/adc/ti-adc081c.c +++ b/drivers/iio/adc/ti-adc081c.c @@ -33,6 +33,12 @@ struct adc081c { /* 8, 10 or 12 */ int bits; + + /* Ensure natural alignment of buffer elements */ + struct { + u16 channel; + s64 ts __aligned(8); + } scan; }; #define REG_CONV_RES 0x00 @@ -128,14 +134,13 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adc081c *data = iio_priv(indio_dev); - u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */ int ret; ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES); if (ret < 0) goto out; - buf[0] = ret; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + data->scan.channel = ret; + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); out: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c index bdedf456ee05..fc053216d282 100644 --- a/drivers/iio/adc/ti-adc084s021.c +++ b/drivers/iio/adc/ti-adc084s021.c @@ -25,6 +25,11 @@ struct adc084s021 { struct spi_transfer spi_trans; struct regulator *reg; struct mutex lock; + /* Buffer used to align data */ + struct { + __be16 channels[4]; + s64 ts __aligned(8); + } scan; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache line. @@ -140,14 +145,13 @@ static irqreturn_t adc084s021_buffer_trigger_handler(int irq, void *pollfunc) struct iio_poll_func *pf = pollfunc; struct iio_dev *indio_dev = pf->indio_dev; struct adc084s021 *adc = iio_priv(indio_dev); - __be16 data[8] = {0}; /* 4 * 16-bit words of data + 8 bytes timestamp */ mutex_lock(&adc->lock); - if (adc084s021_adc_conversion(adc, &data) < 0) + if (adc084s021_adc_conversion(adc, adc->scan.channels) < 0) dev_err(&adc->spi->dev, "Failed to read data\n"); - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, iio_get_time_ns(indio_dev)); mutex_unlock(&adc->lock); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index a550b132cfb7..871690a47661 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -309,6 +309,7 @@ static const struct iio_chan_spec ads1115_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), }; +#ifdef CONFIG_PM static int ads1015_set_power_state(struct ads1015_data *data, bool on) { int ret; @@ -326,6 +327,15 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on) return ret < 0 ? ret : 0; } +#else /* !CONFIG_PM */ + +static int ads1015_set_power_state(struct ads1015_data *data, bool on) +{ + return 0; +} + +#endif /* !CONFIG_PM */ + static int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) { diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index 2ebdfc35bcda..7bf4e9a16a6a 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -75,6 +75,11 @@ struct ccs811_data { struct ccs811_reading buffer; struct iio_trigger *drdy_trig; bool drdy_trig_on; + /* Ensures correct alignment of timestamp if present */ + struct { + s16 channels[2]; + s64 ts __aligned(8); + } scan; }; static const struct iio_chan_spec ccs811_channels[] = { @@ -306,17 +311,17 @@ static irqreturn_t ccs811_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct ccs811_data *data = iio_priv(indio_dev); struct i2c_client *client = data->client; - s16 buf[8]; /* s16 eCO2 + s16 TVOC + padding + 8 byte timestamp */ int ret; - ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, 4, - (u8 *)&buf); + ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, + sizeof(data->scan.channels), + (u8 *)data->scan.channels); if (ret != 4) { dev_err(&client->dev, "cannot read sensor data\n"); goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); err: diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index d2609e6feda4..b4f394f05863 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -57,10 +57,13 @@ static void get_default_min_max_freq(enum motionsensor_type type, { switch (type) { case MOTIONSENSE_TYPE_ACCEL: - case MOTIONSENSE_TYPE_GYRO: *min_freq = 12500; *max_freq = 100000; break; + case MOTIONSENSE_TYPE_GYRO: + *min_freq = 25000; + *max_freq = 100000; + break; case MOTIONSENSE_TYPE_MAG: *min_freq = 5000; *max_freq = 25000; diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index 2d897e64c6a9..424922cad1e3 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c @@ -416,7 +416,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, s64 tmp = *val * (3767897513LL / 25LL); *val = div_s64_rem(tmp, 1000000000LL, val2); - ret = IIO_VAL_INT_PLUS_MICRO; + return IIO_VAL_INT_PLUS_MICRO; } else { int mult; @@ -447,7 +447,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, ret = IIO_VAL_INT; break; default: - ret = -EINVAL; + return -EINVAL; } unlock: diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index dc22dc363a99..29104656a537 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -63,6 +63,7 @@ static const struct reg_field afe4403_reg_fields[] = { * @regulator: Pointer to the regulator for the IC * @trig: IIO trigger for this device * @irq: ADC_RDY line interrupt number + * @buffer: Used to construct data layout to push into IIO buffer. */ struct afe4403_data { struct device *dev; @@ -72,6 +73,8 @@ struct afe4403_data { struct regulator *regulator; struct iio_trigger *trig; int irq; + /* Ensure suitable alignment for timestamp */ + s32 buffer[8] __aligned(8); }; enum afe4403_chan_id { @@ -309,7 +312,6 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) struct iio_dev *indio_dev = pf->indio_dev; struct afe4403_data *afe = iio_priv(indio_dev); int ret, bit, i = 0; - s32 buffer[8]; u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ}; u8 rx[3]; @@ -326,9 +328,9 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) if (ret) goto err; - buffer[i++] = (rx[0] << 16) | - (rx[1] << 8) | - (rx[2]); + afe->buffer[i++] = (rx[0] << 16) | + (rx[1] << 8) | + (rx[2]); } /* Disable reading from the device */ @@ -337,7 +339,8 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) if (ret) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index e728bbb21ca8..cebb1fd4d0b1 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c @@ -83,6 +83,7 @@ static const struct reg_field afe4404_reg_fields[] = { * @regulator: Pointer to the regulator for the IC * @trig: IIO trigger for this device * @irq: ADC_RDY line interrupt number + * @buffer: Used to construct a scan to push to the iio buffer. */ struct afe4404_data { struct device *dev; @@ -91,6 +92,7 @@ struct afe4404_data { struct regulator *regulator; struct iio_trigger *trig; int irq; + s32 buffer[10] __aligned(8); }; enum afe4404_chan_id { @@ -328,17 +330,17 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private) struct iio_dev *indio_dev = pf->indio_dev; struct afe4404_data *afe = iio_priv(indio_dev); int ret, bit, i = 0; - s32 buffer[10]; for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) { ret = regmap_read(afe->regmap, afe4404_channel_values[bit], - &buffer[i++]); + &afe->buffer[i++]); if (ret) goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index dcf5a5bdfaa8..7618cdf59efd 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -38,6 +38,11 @@ struct hdc100x_data { /* integration time of the sensor */ int adc_int_us[2]; + /* Ensure natural alignment of timestamp */ + struct { + __be16 channels[2]; + s64 ts __aligned(8); + } scan; }; /* integration time in us */ @@ -319,7 +324,6 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p) struct i2c_client *client = data->client; int delay = data->adc_int_us[0] + data->adc_int_us[1]; int ret; - s16 buf[8]; /* 2x s16 + padding + 8 byte timestamp */ /* dual read starts at temp register */ mutex_lock(&data->lock); @@ -330,13 +334,13 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p) } usleep_range(delay, delay + 1000); - ret = i2c_master_recv(client, (u8 *)buf, 4); + ret = i2c_master_recv(client, (u8 *)data->scan.channels, 4); if (ret < 0) { dev_err(&client->dev, "cannot read sensor data\n"); goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); err: mutex_unlock(&data->lock); diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h index 7d6771f7cf47..b2eb5abeaccd 100644 --- a/drivers/iio/humidity/hts221.h +++ b/drivers/iio/humidity/hts221.h @@ -14,8 +14,6 @@ #include -#define HTS221_DATA_SIZE 2 - enum hts221_sensor_type { HTS221_SENSOR_H, HTS221_SENSOR_T, @@ -39,6 +37,11 @@ struct hts221_hw { bool enabled; u8 odr; + /* Ensure natural alignment of timestamp */ + struct { + __le16 channels[2]; + s64 ts __aligned(8); + } scan; }; extern const struct dev_pm_ops hts221_pm_ops; diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c index 81d50a861c22..49dcd36d8838 100644 --- a/drivers/iio/humidity/hts221_buffer.c +++ b/drivers/iio/humidity/hts221_buffer.c @@ -162,7 +162,6 @@ static const struct iio_buffer_setup_ops hts221_buffer_ops = { static irqreturn_t hts221_buffer_handler_thread(int irq, void *p) { - u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)]; struct iio_poll_func *pf = p; struct iio_dev *iio_dev = pf->indio_dev; struct hts221_hw *hw = iio_priv(iio_dev); @@ -172,18 +171,20 @@ static irqreturn_t hts221_buffer_handler_thread(int irq, void *p) /* humidity data */ ch = &iio_dev->channels[HTS221_SENSOR_H]; err = regmap_bulk_read(hw->regmap, ch->address, - buffer, HTS221_DATA_SIZE); + &hw->scan.channels[0], + sizeof(hw->scan.channels[0])); if (err < 0) goto out; /* temperature data */ ch = &iio_dev->channels[HTS221_SENSOR_T]; err = regmap_bulk_read(hw->regmap, ch->address, - buffer + HTS221_DATA_SIZE, HTS221_DATA_SIZE); + &hw->scan.channels[1], + sizeof(hw->scan.channels[1])); if (err < 0) goto out; - iio_push_to_buffers_with_timestamp(iio_dev, buffer, + iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan, iio_get_time_ns(iio_dev)); out: diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 524a686077ca..485b2e6748c5 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -130,6 +130,8 @@ static const char * const iio_modifier_names[] = { [IIO_MOD_PM2P5] = "pm2p5", [IIO_MOD_PM4] = "pm4", [IIO_MOD_PM10] = "pm10", + [IIO_MOD_ETHANOL] = "ethanol", + [IIO_MOD_H2] = "h2", }; /* relies on pairs of these shared then separate */ diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 71f99d2a22c1..ceddb6a3b61b 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -1242,13 +1242,16 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ltr501_data *data = iio_priv(indio_dev); - u16 buf[8]; + struct { + u16 channels[3]; + s64 ts __aligned(8); + } scan; __le16 als_buf[2]; u8 mask = 0; int j = 0; int ret, psdata; - memset(buf, 0, sizeof(buf)); + memset(&scan, 0, sizeof(scan)); /* figure out which data needs to be ready */ if (test_bit(0, indio_dev->active_scan_mask) || @@ -1267,9 +1270,9 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) if (ret < 0) return ret; if (test_bit(0, indio_dev->active_scan_mask)) - buf[j++] = le16_to_cpu(als_buf[1]); + scan.channels[j++] = le16_to_cpu(als_buf[1]); if (test_bit(1, indio_dev->active_scan_mask)) - buf[j++] = le16_to_cpu(als_buf[0]); + scan.channels[j++] = le16_to_cpu(als_buf[0]); } if (mask & LTR501_STATUS_PS_RDY) { @@ -1277,10 +1280,10 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) &psdata, 2); if (ret < 0) goto done; - buf[j++] = psdata & LTR501_PS_DATA_MASK; + scan.channels[j++] = psdata & LTR501_PS_DATA_MASK; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); done: diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c index d6d8007ba430..8cc619de2c3a 100644 --- a/drivers/iio/light/max44000.c +++ b/drivers/iio/light/max44000.c @@ -75,6 +75,11 @@ struct max44000_data { struct mutex lock; struct regmap *regmap; + /* Ensure naturally aligned timestamp */ + struct { + u16 channels[2]; + s64 ts __aligned(8); + } scan; }; /* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */ @@ -488,7 +493,6 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct max44000_data *data = iio_priv(indio_dev); - u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */ int index = 0; unsigned int regval; int ret; @@ -498,17 +502,17 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) ret = max44000_read_alsval(data); if (ret < 0) goto out_unlock; - buf[index++] = ret; + data->scan.channels[index++] = ret; } if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) { ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, ®val); if (ret < 0) goto out_unlock; - buf[index] = regval; + data->scan.channels[index] = regval; } mutex_unlock(&data->lock); - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index d32996702110..87c15a63c1a4 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -185,6 +185,11 @@ struct ak8974 { bool drdy_irq; struct completion drdy_complete; bool drdy_active_low; + /* Ensure timestamp is naturally aligned */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; }; static const char ak8974_reg_avdd[] = "avdd"; @@ -581,7 +586,6 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev) { struct ak8974 *ak8974 = iio_priv(indio_dev); int ret; - __le16 hw_values[8]; /* Three axes + 64bit padding */ pm_runtime_get_sync(&ak8974->i2c->dev); mutex_lock(&ak8974->lock); @@ -591,13 +595,13 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev) dev_err(&ak8974->i2c->dev, "error triggering measure\n"); goto out_unlock; } - ret = ak8974_getresult(ak8974, hw_values); + ret = ak8974_getresult(ak8974, ak8974->scan.channels); if (ret) { dev_err(&ak8974->i2c->dev, "error getting measures\n"); goto out_unlock; } - iio_push_to_buffers_with_timestamp(indio_dev, hw_values, + iio_push_to_buffers_with_timestamp(indio_dev, &ak8974->scan, iio_get_time_ns(indio_dev)); out_unlock: @@ -764,19 +768,21 @@ static int ak8974_probe(struct i2c_client *i2c, ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config); if (IS_ERR(ak8974->map)) { dev_err(&i2c->dev, "failed to allocate register map\n"); + pm_runtime_put_noidle(&i2c->dev); + pm_runtime_disable(&i2c->dev); return PTR_ERR(ak8974->map); } ret = ak8974_set_power(ak8974, AK8974_PWR_ON); if (ret) { dev_err(&i2c->dev, "could not power on\n"); - goto power_off; + goto disable_pm; } ret = ak8974_detect(ak8974); if (ret) { dev_err(&i2c->dev, "neither AK8974 nor AMI30x found\n"); - goto power_off; + goto disable_pm; } ret = ak8974_selftest(ak8974); @@ -786,14 +792,9 @@ static int ak8974_probe(struct i2c_client *i2c, ret = ak8974_reset(ak8974); if (ret) { dev_err(&i2c->dev, "AK8974 reset failed\n"); - goto power_off; + goto disable_pm; } - pm_runtime_set_autosuspend_delay(&i2c->dev, - AK8974_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(&i2c->dev); - pm_runtime_put(&i2c->dev); - indio_dev->dev.parent = &i2c->dev; indio_dev->channels = ak8974_channels; indio_dev->num_channels = ARRAY_SIZE(ak8974_channels); @@ -846,6 +847,11 @@ no_irq: goto cleanup_buffer; } + pm_runtime_set_autosuspend_delay(&i2c->dev, + AK8974_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&i2c->dev); + pm_runtime_put(&i2c->dev); + return 0; cleanup_buffer: @@ -854,7 +860,6 @@ disable_pm: pm_runtime_put_noidle(&i2c->dev); pm_runtime_disable(&i2c->dev); ak8974_set_power(ak8974, AK8974_PWR_OFF); -power_off: regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs); return ret; diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index 893bec5a0312..82af903a765b 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -368,6 +368,12 @@ struct ak8975_data { struct iio_mount_matrix orientation; struct regulator *vdd; struct regulator *vid; + + /* Ensure natural alignment of timestamp */ + struct { + s16 channels[3]; + s64 ts __aligned(8); + } scan; }; /* Enable attached power regulator if any. */ @@ -805,7 +811,6 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) const struct i2c_client *client = data->client; const struct ak_def *def = data->def; int ret; - s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */ __le16 fval[3]; mutex_lock(&data->lock); @@ -828,12 +833,13 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) mutex_unlock(&data->lock); /* Clamp to valid range. */ - buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); - buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); - buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); + data->scan.channels[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); + data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); + data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); - iio_push_to_buffers_with_timestamp(indio_dev, buff, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); + return; unlock: diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 8d0f15f27dc5..0a95afaa48fe 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -264,6 +264,8 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data, + (s32)2097152) * calib->H2 + 8192) >> 14); var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)calib->H1) >> 4; + var = clamp_val(var, 0, 419430400); + return var >> 12; }; @@ -706,7 +708,7 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas) unsigned int ctrl; if (data->use_eoc) - init_completion(&data->done); + reinit_completion(&data->done); ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas); if (ret) @@ -962,6 +964,9 @@ static int bmp085_fetch_eoc_irq(struct device *dev, "trying to enforce it\n"); irq_trig = IRQF_TRIGGER_RISING; } + + init_completion(&data->done); + ret = devm_request_threaded_irq(dev, irq, bmp085_eoc_irq, diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c index 2f598ad91621..f5db9fa086f3 100644 --- a/drivers/iio/pressure/ms5611_core.c +++ b/drivers/iio/pressure/ms5611_core.c @@ -212,16 +212,21 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ms5611_state *st = iio_priv(indio_dev); - s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */ + /* Ensure buffer elements are naturally aligned */ + struct { + s32 channels[2]; + s64 ts __aligned(8); + } scan; int ret; mutex_lock(&st->lock); - ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]); + ret = ms5611_read_temp_and_pressure(indio_dev, &scan.channels[1], + &scan.channels[0]); mutex_unlock(&st->lock); if (ret < 0) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); err: diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c index 9d0d07930236..7f2e5a8942a4 100644 --- a/drivers/iio/pressure/zpa2326.c +++ b/drivers/iio/pressure/zpa2326.c @@ -664,8 +664,10 @@ static int zpa2326_resume(const struct iio_dev *indio_dev) int err; err = pm_runtime_get_sync(indio_dev->dev.parent); - if (err < 0) + if (err < 0) { + pm_runtime_put(indio_dev->dev.parent); return err; + } if (err > 0) { /* diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c index 166b3e6d7db8..5254b1fbccfd 100644 --- a/drivers/iio/proximity/mb1232.c +++ b/drivers/iio/proximity/mb1232.c @@ -40,6 +40,11 @@ struct mb1232_data { */ struct completion ranging; int irqnr; + /* Ensure correct alignment of data to push to IIO buffer */ + struct { + s16 distance; + s64 ts __aligned(8); + } scan; }; static irqreturn_t mb1232_handle_irq(int irq, void *dev_id) @@ -113,17 +118,13 @@ static irqreturn_t mb1232_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mb1232_data *data = iio_priv(indio_dev); - /* - * triggered buffer - * 16-bit channel + 48-bit padding + 64-bit timestamp - */ - s16 buffer[8] = { 0 }; - buffer[0] = mb1232_read_distance(data); - if (buffer[0] < 0) + data->scan.distance = mb1232_read_distance(data); + if (data->scan.distance < 0) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 8f776b7de45e..e3cd9d2b0dd2 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1631,6 +1631,8 @@ static struct rdma_id_private *cma_find_listener( { struct rdma_id_private *id_priv, *id_priv_dev; + lockdep_assert_held(&lock); + if (!bind_list) return ERR_PTR(-EINVAL); @@ -1677,6 +1679,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, } } + mutex_lock(&lock); /* * Net namespace might be getting deleted while route lookup, * cm_id lookup is in progress. Therefore, perform netdevice @@ -1718,6 +1721,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); err: rcu_read_unlock(); + mutex_unlock(&lock); if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; @@ -2473,6 +2477,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, struct net *net = id_priv->id.route.addr.dev_addr.net; int ret; + lockdep_assert_held(&lock); + if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) return; @@ -3245,6 +3251,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list, u64 sid, mask; __be16 port; + lockdep_assert_held(&lock); + addr = cma_src_addr(id_priv); port = htons(bind_list->port); @@ -3273,6 +3281,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps, struct rdma_bind_list *bind_list; int ret; + lockdep_assert_held(&lock); + bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); if (!bind_list) return -ENOMEM; @@ -3299,6 +3309,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, struct sockaddr *saddr = cma_src_addr(id_priv); __be16 dport = cma_port(daddr); + lockdep_assert_held(&lock); + hlist_for_each_entry(cur_id, &bind_list->owners, node) { struct sockaddr *cur_daddr = cma_dst_addr(cur_id); struct sockaddr *cur_saddr = cma_src_addr(cur_id); @@ -3338,6 +3350,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps, unsigned int rover; struct net *net = id_priv->id.route.addr.dev_addr.net; + lockdep_assert_held(&lock); + inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = prandom_u32() % remaining + low; @@ -3385,6 +3399,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list, struct rdma_id_private *cur_id; struct sockaddr *addr, *cur_addr; + lockdep_assert_held(&lock); + addr = cma_src_addr(id_priv); hlist_for_each_entry(cur_id, &bind_list->owners, node) { if (id_priv == cur_id) @@ -3415,6 +3431,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps, unsigned short snum; int ret; + lockdep_assert_held(&lock); + snum = ntohs(cma_port(cma_src_addr(id_priv))); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index 8b0b5ae22e4c..726e70b68249 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c @@ -322,8 +322,21 @@ fail: return ERR_PTR(err); } +static void drop_cma_dev(struct config_group *cgroup, struct config_item *item) +{ + struct config_group *group = + container_of(item, struct config_group, cg_item); + struct cma_dev_group *cma_dev_group = + container_of(group, struct cma_dev_group, device_group); + + configfs_remove_default_groups(&cma_dev_group->ports_group); + configfs_remove_default_groups(&cma_dev_group->device_group); + config_item_put(item); +} + static struct configfs_group_operations cma_subsys_group_ops = { .make_group = make_cma_dev, + .drop_item = drop_cma_dev, }; static const struct config_item_type cma_subsys_type = { diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 46dd50ff7c85..f454d63008d6 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -195,7 +195,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp) return ret; } -static void counter_history_stat_update(const struct rdma_counter *counter) +static void counter_history_stat_update(struct rdma_counter *counter) { struct ib_device *dev = counter->device; struct rdma_port_counter *port_counter; @@ -205,6 +205,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter) if (!port_counter->hstats) return; + rdma_counter_query_stats(counter); + for (i = 0; i < counter->stats->num_counters; i++) port_counter->hstats->value[i] += counter->stats->value[i]; } @@ -282,7 +284,7 @@ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port) struct rdma_counter *counter; int ret; - if (!qp->res.valid) + if (!qp->res.valid || rdma_is_kernel_res(&qp->res)) return 0; if (!rdma_is_port_valid(dev, port)) @@ -485,7 +487,7 @@ int rdma_counter_bind_qpn(struct ib_device *dev, u8 port, goto err; } - if (counter->res.task != qp->res.task) { + if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) { ret = -EINVAL; goto err_task; } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 10ae6c6eab0a..59dc9f3cfb37 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1330,6 +1330,10 @@ out: return ret; } +static void prevent_dealloc_device(struct ib_device *ib_dev) +{ +} + /** * ib_register_device - Register an IB device with IB core * @device:Device to register @@ -1397,11 +1401,11 @@ int ib_register_device(struct ib_device *device, const char *name) * possibility for a parallel unregistration along with this * error flow. Since we have a refcount here we know any * parallel flow is stopped in disable_device and will see the - * NULL pointers, causing the responsibility to + * special dealloc_driver pointer, causing the responsibility to * ib_dealloc_device() to revert back to this thread. */ dealloc_fn = device->ops.dealloc_driver; - device->ops.dealloc_driver = NULL; + device->ops.dealloc_driver = prevent_dealloc_device; ib_device_put(device); __ib_unregister_device(device); device->ops.dealloc_driver = dealloc_fn; @@ -1449,7 +1453,8 @@ static void __ib_unregister_device(struct ib_device *ib_dev) * Drivers using the new flow may not call ib_dealloc_device except * in error unwind prior to registration success. */ - if (ib_dev->ops.dealloc_driver) { + if (ib_dev->ops.dealloc_driver && + ib_dev->ops.dealloc_driver != prevent_dealloc_device) { WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1); ib_dealloc_device(ib_dev); } diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 9947d16edef2..2284930b5f91 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -639,10 +639,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); flush_workqueue(port_priv->wq); - ib_cancel_rmpp_recvs(mad_agent_priv); deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); + ib_cancel_rmpp_recvs(mad_agent_priv); ib_mad_agent_security_cleanup(&mad_agent_priv->agent); @@ -2960,6 +2960,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { + kfree(mad_priv); ret = -ENOMEM; break; } diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 244ebf285fc3..e4905d9fecb0 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -702,9 +702,6 @@ static int fill_stat_counter_qps(struct sk_buff *msg, continue; qp = container_of(res, struct ib_qp, res); - if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) - continue; - if (!qp->counter || (qp->counter->id != counter->id)) continue; diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index bddb5434fbed..d2d70c89193f 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) return len; } -static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) +static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; void *data; struct ib_sa_mad *mad; int len; + unsigned long flags; + unsigned long delay; + gfp_t gfp_flag; + int ret; + + INIT_LIST_HEAD(&query->list); + query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); mad = query->mad_buf->mad; len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); @@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) /* Repair the nlmsg header length */ nlmsg_end(skb, nlh); - return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask); -} + gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC : + GFP_NOWAIT; -static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) -{ - unsigned long flags; - unsigned long delay; - int ret; - - INIT_LIST_HEAD(&query->list); - query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); - - /* Put the request on the list first.*/ spin_lock_irqsave(&ib_nl_request_lock, flags); + ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag); + + if (ret) + goto out; + + /* Put the request on the list.*/ delay = msecs_to_jiffies(sa_local_svc_timeout_ms); query->timeout = delay + jiffies; list_add_tail(&query->list, &ib_nl_request_list); /* Start the timeout if this is the only request */ if (ib_nl_request_list.next == &query->list) queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); - spin_unlock_irqrestore(&ib_nl_request_lock, flags); - ret = ib_nl_send_msg(query, gfp_mask); - if (ret) { - ret = -EIO; - /* Remove the request */ - spin_lock_irqsave(&ib_nl_request_lock, flags); - list_del(&query->list); - spin_unlock_irqrestore(&ib_nl_request_lock, flags); - } +out: + spin_unlock_irqrestore(&ib_nl_request_lock, flags); return ret; } diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 7a50cedcef1f..091cca9d88ed 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -1060,8 +1060,7 @@ static int add_port(struct ib_core_device *coredev, int port_num) coredev->ports_kobj, "%d", port_num); if (ret) { - kfree(p); - return ret; + goto err_put; } p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL); @@ -1074,8 +1073,7 @@ static int add_port(struct ib_core_device *coredev, int port_num) ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type, &p->kobj, "gid_attrs"); if (ret) { - kfree(p->gid_attr_group); - goto err_put; + goto err_put_gid_attrs; } if (device->ops.process_mad && is_full_dev) { @@ -1406,8 +1404,10 @@ int ib_port_register_module_stat(struct ib_device *device, u8 port_num, ret = kobject_init_and_add(kobj, ktype, &port->kobj, "%s", name); - if (ret) + if (ret) { + kobject_put(kobj); return ret; + } } return 0; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index e2ddcb0dc4ee..c398d1a64614 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -757,6 +757,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs) mr->uobject = uobj; atomic_inc(&pd->usecnt); mr->res.type = RDMA_RESTRACK_MR; + mr->iova = cmd.hca_va; rdma_restrack_uadd(&mr->res); uobj->object = mr; @@ -847,6 +848,9 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs) atomic_dec(&old_pd->usecnt); } + if (cmd.flags & IB_MR_REREG_TRANS) + mr->iova = cmd.hca_va; + memset(&resp, 0, sizeof(resp)); resp.lkey = mr->lkey; resp.rkey = mr->rkey; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 6c4093d0a91d..5d896f6b2b61 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1648,7 +1648,7 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, if (!(rdma_protocol_ib(qp->device, attr->alt_ah_attr.port_num) && rdma_protocol_ib(qp->device, port))) { - ret = EINVAL; + ret = -EINVAL; goto out; } } @@ -1749,7 +1749,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width) dev_put(netdev); - if (!rc) { + if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) { netdev_speed = lksettings.base.speed; } else { netdev_speed = SPEED_1000; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index ebc3e3d4a6e2..3b05c0640338 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -2973,6 +2973,19 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, wc->wc_flags |= IB_WC_GRH; } +static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, + u16 vlan_id) +{ + /* + * Check if the vlan is configured in the host. If not configured, it + * can be a transparent VLAN. So dont report the vlan id. + */ + if (!__vlan_find_dev_deep_rcu(rdev->netdev, + htons(ETH_P_8021Q), vlan_id)) + return false; + return true; +} + static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe, u16 *vid, u8 *sl) { @@ -3041,9 +3054,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp, wc->src_qp = orig_cqe->src_qp; memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { - wc->vlan_id = vlan_id; - wc->sl = sl; - wc->wc_flags |= IB_WC_WITH_VLAN; + if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { + wc->vlan_id = vlan_id; + wc->sl = sl; + wc->wc_flags |= IB_WC_WITH_VLAN; + } } wc->port_num = 1; wc->vendor_err = orig_cqe->status; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 27e2df44d043..cfe5f47d9890 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -789,7 +789,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct ib_event event; unsigned int flags; - if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && + rdma_is_kernel_res(&qp->ib_qp.res)) { flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 40296b97d21e..079aaaaffec7 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -152,7 +152,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_inline_data = le32_to_cpu(sb->max_inline_data); attr->l2_db_size = (sb->l2_db_space_size + 1) * (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); - attr->max_sgid = le32_to_cpu(sb->max_gid); + attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED; bnxt_qplib_query_version(rcfw, attr->fw_ver); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 13d9432d5ce2..194f5ef45ca6 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -47,6 +47,7 @@ struct bnxt_qplib_dev_attr { #define FW_VER_ARR_LEN 4 u8 fw_ver[FW_VER_ARR_LEN]; +#define BNXT_QPLIB_NUM_GIDS_SUPPORTED 256 u16 max_sgid; u16 max_mrw; u32 max_qp; diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 599340c1f0b8..541dbcf22d0e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -953,6 +953,7 @@ void c4iw_dealloc(struct uld_ctx *ctx) static void c4iw_remove(struct uld_ctx *ctx) { pr_debug("c4iw_dev %p\n", ctx->dev); + debugfs_remove_recursive(ctx->dev->debugfs_root); c4iw_unregister_device(ctx->dev); c4iw_dealloc(ctx); } diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 35c284af574d..dcb58cef336d 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -399,7 +399,6 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) mmid = stag >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; mhp->ibmr.length = mhp->attr.len; - mhp->ibmr.iova = mhp->attr.va_fbo; mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12); pr_debug("mmid 0x%x mhp %p\n", mmid, mhp); return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL); diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index d268bf9c42ee..c29da2f4e339 100644 --- a/drivers/infiniband/hw/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c @@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf, static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target) { struct hfi1_pportdata *ppd; - int ret; ppd = private2ppd(fp); - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); - if (ret) /* failed - release the module */ - module_put(THIS_MODULE); - - return ret; + return acquire_chip_resource(ppd->dd, i2c_target(target), 0); } static int i2c1_debugfs_open(struct inode *in, struct file *fp) @@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target) ppd = private2ppd(fp); release_chip_resource(ppd->dd, i2c_target(target)); - module_put(THIS_MODULE); return 0; } @@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp) static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target) { struct hfi1_pportdata *ppd; - int ret; - - if (!try_module_get(THIS_MODULE)) - return -ENODEV; ppd = private2ppd(fp); - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); - if (ret) /* failed - release the module */ - module_put(THIS_MODULE); - - return ret; + return acquire_chip_resource(ppd->dd, i2c_target(target), 0); } static int qsfp1_debugfs_open(struct inode *in, struct file *fp) @@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target) ppd = private2ppd(fp); release_chip_resource(ppd->dd, i2c_target(target)); - module_put(THIS_MODULE); return 0; } diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 26b792bb1027..fbff6b2f00e7 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -844,6 +844,29 @@ wq_error: return -ENOMEM; } +/** + * destroy_workqueues - destroy per port workqueues + * @dd: the hfi1_ib device + */ +static void destroy_workqueues(struct hfi1_devdata *dd) +{ + int pidx; + struct hfi1_pportdata *ppd; + + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + + if (ppd->hfi1_wq) { + destroy_workqueue(ppd->hfi1_wq); + ppd->hfi1_wq = NULL; + } + if (ppd->link_wq) { + destroy_workqueue(ppd->link_wq); + ppd->link_wq = NULL; + } + } +} + /** * enable_general_intr() - Enable the IRQs that will be handled by the * general interrupt handler. @@ -1117,15 +1140,10 @@ static void shutdown_device(struct hfi1_devdata *dd) * We can't count on interrupts since we are stopping. */ hfi1_quiet_serdes(ppd); - - if (ppd->hfi1_wq) { - destroy_workqueue(ppd->hfi1_wq); - ppd->hfi1_wq = NULL; - } - if (ppd->link_wq) { - destroy_workqueue(ppd->link_wq); - ppd->link_wq = NULL; - } + if (ppd->hfi1_wq) + flush_workqueue(ppd->hfi1_wq); + if (ppd->link_wq) + flush_workqueue(ppd->link_wq); } sdma_exit(dd); } @@ -1814,6 +1832,7 @@ static void remove_one(struct pci_dev *pdev) * clear dma engines, etc. */ shutdown_device(dd); + destroy_workqueues(dd); stop_timers(dd); diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index f8e733aa3bb8..acd4400b0092 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -381,7 +381,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp) struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + struct hfi1_devdata *dd = ppd->dd; + + if (dd->flags & HFI1_SHUTDOWN) + return true; return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 8a2e0d9351e9..c018fc633cca 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe) case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: switch (prev->wr.opcode) { case IB_WR_TID_RDMA_WRITE: req = wqe_to_tid_req(prev); @@ -5406,7 +5407,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp) struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + struct hfi1_devdata *dd = ppd->dd; + + if ((dd->flags & HFI1_SHUTDOWN)) + return true; return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 4540b00ccee9..0502c90c83ed 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1349,34 +1349,26 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev) { struct hns_roce_pf_timer_res_a *req_a; - struct hns_roce_cmq_desc desc[2]; - int ret, i; + struct hns_roce_cmq_desc desc; + int ret; - for (i = 0; i < 2; i++) { - hns_roce_cmq_setup_basic_desc(&desc[i], - HNS_ROCE_OPC_QUERY_PF_TIMER_RES, - true); + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES, + true); - if (i == 0) - desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); - else - desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); - } - - ret = hns_roce_cmq_send(hr_dev, desc, 2); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) return ret; - req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data; + req_a = (struct hns_roce_pf_timer_res_a *)desc.data; hr_dev->caps.qpc_timer_bt_num = - roce_get_field(req_a->qpc_timer_bt_idx_num, - PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M, - PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S); + roce_get_field(req_a->qpc_timer_bt_idx_num, + PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M, + PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S); hr_dev->caps.cqc_timer_bt_num = - roce_get_field(req_a->cqc_timer_bt_idx_num, - PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M, - PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S); + roce_get_field(req_a->cqc_timer_bt_idx_num, + PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M, + PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S); return 0; } @@ -4564,7 +4556,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_attr->path_mig_state = IB_MIG_ARMED; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; if (hr_qp->ibqp.qp_type == IB_QPT_UD) - qp_attr->qkey = V2_QKEY_VAL; + qp_attr->qkey = le32_to_cpu(context.qkey_xrcd); qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn, V2_QPC_BYTE_108_RX_REQ_EPSN_M, diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 61a1b0bdede0..b8274c6fc43e 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -781,7 +781,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->ip_gids = true; props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; props->max_msg_sz = mdev->dev->caps.max_msg_sz; - props->pkey_tbl_len = 1; + if (mdev->dev->caps.pkey_table_len[port]) + props->pkey_tbl_len = 1; props->max_mtu = IB_MTU_4096; props->max_vl_num = 2; props->state = IB_PORT_DOWN; diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 6ae503cfc526..9114cb730769 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -439,7 +439,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; mr->ibmr.length = length; - mr->ibmr.iova = virt_addr; mr->ibmr.page_size = 1U << shift; return &mr->ibmr; diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index d609f4659afb..fd75a9043bf1 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -489,6 +489,10 @@ static u64 devx_get_obj_id(const void *in) obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, MLX5_GET(rst2init_qp_in, in, qpn)); break; + case MLX5_CMD_OP_INIT2INIT_QP: + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, + MLX5_GET(init2init_qp_in, in, qpn)); + break; case MLX5_CMD_OP_INIT2RTR_QP: obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, MLX5_GET(init2rtr_qp_in, in, qpn)); @@ -814,6 +818,7 @@ static bool devx_is_obj_modify_cmd(const void *in) case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: case MLX5_CMD_OP_RST2INIT_QP: case MLX5_CMD_OP_INIT2RTR_QP: + case MLX5_CMD_OP_INIT2INIT_QP: case MLX5_CMD_OP_RTR2RTS_QP: case MLX5_CMD_OP_RTS2RTS_QP: case MLX5_CMD_OP_SQERR2RTS_QP: diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 4f44a731a48e..b781ad74e6de 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -517,7 +517,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, mdev_port_num); if (err) goto out; - ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet); + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); props->active_width = IB_WIDTH_4X; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 96edc5c30204..09e29c6cb66d 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1463,6 +1463,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, u16 uid = to_mpd(pd)->uid; u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {}; + if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt) + return -EINVAL; if (qp->sq.wqe_cnt) { err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd); if (err) diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 4e7fde86c96b..c29c1f7da4a1 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -310,12 +310,18 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq, srq->msrq.event = mlx5_ib_srq_event; srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; - if (udata) - if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { + if (udata) { + struct mlx5_ib_create_srq_resp resp = { + .srqn = srq->msrq.srqn, + }; + + if (ib_copy_to_udata(udata, &resp, min(udata->outlen, + sizeof(resp)))) { mlx5_ib_dbg(dev, "copy to user failed\n"); err = -EFAULT; goto err_core; } + } init_attr->attr.max_wr = srq->msrq.max - 1; diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c index 8fc3630a9d4c..0224231a2e6f 100644 --- a/drivers/infiniband/hw/mlx5/srq_cmd.c +++ b/drivers/infiniband/hw/mlx5/srq_cmd.c @@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn) struct mlx5_srq_table *table = &dev->srq_table; struct mlx5_core_srq *srq; - xa_lock(&table->array); + xa_lock_irq(&table->array); srq = xa_load(&table->array, srqn); if (srq) refcount_inc(&srq->common.refcount); - xa_unlock(&table->array); + xa_unlock_irq(&table->array); return srq; } diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 8e927f6c1520..ed56df319d2d 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -349,10 +349,10 @@ struct qedr_srq_hwq_info { u32 wqe_prod; u32 sge_prod; u32 wr_prod_cnt; - u32 wr_cons_cnt; + atomic_t wr_cons_cnt; u32 num_elems; - u32 *virt_prod_pair_addr; + struct rdma_srq_producers *virt_prod_pair_addr; dma_addr_t phy_prod_pair_addr; }; diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 5e9732990be5..a7a926b7b562 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context, if (params->cm_info) { event.ird = params->cm_info->ird; event.ord = params->cm_info->ord; - event.private_data_len = params->cm_info->private_data_len; - event.private_data = (void *)params->cm_info->private_data; + /* Only connect_request and reply have valid private data + * the rest of the events this may be left overs from + * connection establishment. CONNECT_REQUEST is issued via + * qedr_iw_mpa_request + */ + if (event_type == IW_CM_EVENT_CONNECT_REPLY) { + event.private_data_len = + params->cm_info->private_data_len; + event.private_data = + (void *)params->cm_info->private_data; + } } if (ep->cm_id) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 8b4240c1cc76..16a994fd7d0a 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3460,7 +3460,7 @@ static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq) * count and consumer count and subtract it from max * work request supported so that we get elements left. */ - used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt; + used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt); return hw_srq->max_wr - used; } @@ -3475,7 +3475,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, unsigned long flags; int status = 0; u32 num_sge; - u32 offset; spin_lock_irqsave(&srq->lock, flags); @@ -3488,7 +3487,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, if (!qedr_srq_elem_left(hw_srq) || wr->num_sge > srq->hw_srq.max_sges) { DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n", - hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt, + hw_srq->wr_prod_cnt, + atomic_read(&hw_srq->wr_cons_cnt), wr->num_sge, srq->hw_srq.max_sges); status = -ENOMEM; *bad_wr = wr; @@ -3522,22 +3522,20 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, hw_srq->sge_prod++; } - /* Flush WQE and SGE information before + /* Update WQE and SGE information before * updating producer. */ - wmb(); + dma_wmb(); /* SRQ producer is 8 bytes. Need to update SGE producer index * in first 4 bytes and need to update WQE producer in * next 4 bytes. */ - *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod; - offset = offsetof(struct rdma_srq_producers, wqe_prod); - *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) = - hw_srq->wqe_prod; + srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod; + /* Make sure sge producer is updated first */ + dma_wmb(); + srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod; - /* Flush producer after updating it. */ - wmb(); wr = wr->next; } @@ -3956,7 +3954,7 @@ static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp, } else { __process_resp_one(dev, qp, cq, wc, resp, wr_id); } - srq->hw_srq.wr_cons_cnt++; + atomic_inc(&srq->hw_srq.wr_cons_cnt); return 1; } diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index d35465389357..d14ad523f96c 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -898,8 +898,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, qp->s_tail_ack_queue = 0; qp->s_acked_ack_queue = 0; qp->s_num_rd_atomic = 0; - if (qp->r_rq.kwq) - qp->r_rq.kwq->count = qp->r_rq.size; qp->r_sge.num_sge = 0; atomic_set(&qp->s_reserved_used, 0); } @@ -1196,7 +1194,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, err = alloc_ud_wq_attr(qp, rdi->dparms.node); if (err) { ret = (ERR_PTR(err)); - goto bail_driver_priv; + goto bail_rq_rvt; } err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, @@ -1300,9 +1298,11 @@ bail_qpn: rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); bail_rq_wq: - rvt_free_rq(&qp->r_rq); free_ud_wq_attr(qp); +bail_rq_rvt: + rvt_free_rq(&qp->r_rq); + bail_driver_priv: rdi->driver_f.qp_priv_free(rdi, qp); @@ -2350,31 +2350,6 @@ bad_lkey: return 0; } -/** - * get_count - count numbers of request work queue entries - * in circular buffer - * @rq: data structure for request queue entry - * @tail: tail indices of the circular buffer - * @head: head indices of the circular buffer - * - * Return - total number of entries in the circular buffer - */ -static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head) -{ - u32 count; - - count = head; - - if (count >= rq->size) - count = 0; - if (count < tail) - count += rq->size - tail; - else - count -= tail; - - return count; -} - /** * get_rvt_head - get head indices of the circular buffer * @rq: data structure for request queue entry @@ -2449,7 +2424,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) { head = get_rvt_head(rq, ip); - kwq->count = get_count(rq, tail, head); + kwq->count = rvt_get_rq_count(rq, head, tail); } if (unlikely(kwq->count == 0)) { ret = 0; @@ -2484,7 +2459,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) * the number of remaining WQEs. */ if (kwq->count < srq->limit) { - kwq->count = get_count(rq, tail, get_rvt_head(rq, ip)); + kwq->count = + rvt_get_rq_count(rq, + get_rvt_head(rq, ip), tail); if (kwq->count < srq->limit) { struct ib_event ev; diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c index 890d7b760d2e..27415185d862 100644 --- a/drivers/infiniband/sw/rdmavt/rc.c +++ b/drivers/infiniband/sw/rdmavt/rc.c @@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp) * not atomic, which is OK, since the fuzziness is * resolved as further ACKs go out. */ - credits = head - tail; - if ((int)credits < 0) - credits += qp->r_rq.size; + credits = rvt_get_rq_count(&qp->r_rq, head, tail); } /* * Binary search the credit table to find the code to diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index a8c11b5e1e94..70c4ea438664 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -48,6 +48,8 @@ static void rxe_cleanup_ports(struct rxe_dev *rxe) } +bool rxe_initialized; + /* free resources for a rxe device all objects created for this device must * have been destroyed */ @@ -157,9 +159,6 @@ static int rxe_init_ports(struct rxe_dev *rxe) rxe_init_port_param(port); - if (!port->attr.pkey_tbl_len || !port->attr.gid_tbl_len) - return -EINVAL; - port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len, sizeof(*port->pkey_tbl), GFP_KERNEL); @@ -358,6 +357,7 @@ static int __init rxe_module_init(void) return err; rdma_link_register(&rxe_link_ops); + rxe_initialized = true; pr_info("loaded\n"); return 0; } @@ -369,6 +369,7 @@ static void __exit rxe_module_exit(void) rxe_net_exit(); rxe_cache_exit(); + rxe_initialized = false; pr_info("unloaded\n"); } diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index fb07eed9e402..cae1b0a24c85 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -67,6 +67,8 @@ #define RXE_ROCE_V2_SPORT (0xc000) +extern bool rxe_initialized; + static inline u32 rxe_crc32(struct rxe_dev *rxe, u32 crc, void *next, size_t len) { diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index ea6a819b7167..ffbc50341a55 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -207,6 +207,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start, vaddr = page_address(sg_page_iter_page(&sg_iter)); if (!vaddr) { pr_warn("null vaddr\n"); + ib_umem_release(umem); err = -ENOMEM; goto err1; } diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index 831ad578a7b2..46e111c218fd 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -330,10 +330,14 @@ err1: static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb) { + struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); const struct ib_gid_attr *gid_attr; union ib_gid dgid; union ib_gid *pdgid; + if (pkt->mask & RXE_LOOPBACK_MASK) + return 0; + if (skb->protocol == htons(ETH_P_IP)) { ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr, (struct in6_addr *)&dgid); @@ -366,7 +370,7 @@ void rxe_rcv(struct sk_buff *skb) if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES)) goto drop; - if (unlikely(rxe_match_dgid(rxe, skb) < 0)) { + if (rxe_match_dgid(rxe, skb) < 0) { pr_warn_ratelimited("failed matching dgid\n"); goto drop; } diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c index ccda5f5a3bc0..2af31d421bfc 100644 --- a/drivers/infiniband/sw/rxe/rxe_sysfs.c +++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c @@ -61,6 +61,11 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp) struct net_device *ndev; struct rxe_dev *exists; + if (!rxe_initialized) { + pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n"); + return -EAGAIN; + } + len = sanitize_arg(val, intf, sizeof(intf)); if (!len) { pr_err("add: invalid interface name\n"); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 623129f27f5a..d1fe57ac87f5 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -679,6 +679,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, unsigned int mask; unsigned int length = 0; int i; + struct ib_send_wr *next; while (wr) { mask = wr_opcode_mask(wr->opcode, qp); @@ -695,6 +696,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, break; } + next = wr->next; + length = 0; for (i = 0; i < wr->num_sge; i++) length += wr->sg_list[i].length; @@ -705,7 +708,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, *bad_wr = wr; break; } - wr = wr->next; + wr = next; } rxe_run_task(&qp->req.task, 1); @@ -1075,7 +1078,7 @@ static ssize_t parent_show(struct device *device, struct rxe_dev *rxe = rdma_device_to_drv_device(device, struct rxe_dev, ib_dev); - return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1)); + return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1)); } static DEVICE_ATTR_RO(parent); diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index 130b1e31b978..fb66d6757278 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -66,12 +66,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name) static int dev_id = 1; int rv; + sdev->vendor_part_id = dev_id++; + rv = ib_register_device(base_dev, name); if (rv) { pr_warn("siw: device registration error %d\n", rv); return rv; } - sdev->vendor_part_id = dev_id++; siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr); diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index c0a887240325..0520e70084f9 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx, break; bytes = min(bytes, len); - if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) { + if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) == + bytes) { copied += bytes; offset += bytes; len -= bytes; diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 0e5f27caf2b2..50a355738609 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -515,7 +515,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev); int ipoib_ib_dev_open_default(struct net_device *dev); int ipoib_ib_dev_open(struct net_device *dev); -int ipoib_ib_dev_stop(struct net_device *dev); +void ipoib_ib_dev_stop(struct net_device *dev); void ipoib_ib_dev_up(struct net_device *dev); void ipoib_ib_dev_down(struct net_device *dev); int ipoib_ib_dev_stop_default(struct net_device *dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index da3c5315bbb5..494f413dc3c6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -670,13 +670,12 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, return rc; } -static void __ipoib_reap_ah(struct net_device *dev) +static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv) { - struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_ah *ah, *tah; unsigned long flags; - netif_tx_lock_bh(dev); + netif_tx_lock_bh(priv->dev); spin_lock_irqsave(&priv->lock, flags); list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) @@ -687,37 +686,37 @@ static void __ipoib_reap_ah(struct net_device *dev) } spin_unlock_irqrestore(&priv->lock, flags); - netif_tx_unlock_bh(dev); + netif_tx_unlock_bh(priv->dev); } void ipoib_reap_ah(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, ah_reap_task.work); - struct net_device *dev = priv->dev; - __ipoib_reap_ah(dev); + ipoib_reap_dead_ahs(priv); if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) queue_delayed_work(priv->wq, &priv->ah_reap_task, round_jiffies_relative(HZ)); } -static void ipoib_flush_ah(struct net_device *dev) +static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv) { - struct ipoib_dev_priv *priv = ipoib_priv(dev); - - cancel_delayed_work(&priv->ah_reap_task); - flush_workqueue(priv->wq); - ipoib_reap_ah(&priv->ah_reap_task.work); + clear_bit(IPOIB_STOP_REAPER, &priv->flags); + queue_delayed_work(priv->wq, &priv->ah_reap_task, + round_jiffies_relative(HZ)); } -static void ipoib_stop_ah(struct net_device *dev) +static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv) { - struct ipoib_dev_priv *priv = ipoib_priv(dev); - set_bit(IPOIB_STOP_REAPER, &priv->flags); - ipoib_flush_ah(dev); + cancel_delayed_work(&priv->ah_reap_task); + /* + * After ipoib_stop_ah_reaper() we always go through + * ipoib_reap_dead_ahs() which ensures the work is really stopped and + * does a final flush out of the dead_ah's list + */ } static int recvs_pending(struct net_device *dev) @@ -846,18 +845,6 @@ timeout: return 0; } -int ipoib_ib_dev_stop(struct net_device *dev) -{ - struct ipoib_dev_priv *priv = ipoib_priv(dev); - - priv->rn_ops->ndo_stop(dev); - - clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - ipoib_flush_ah(dev); - - return 0; -} - int ipoib_ib_dev_open_default(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -901,10 +888,7 @@ int ipoib_ib_dev_open(struct net_device *dev) return -1; } - clear_bit(IPOIB_STOP_REAPER, &priv->flags); - queue_delayed_work(priv->wq, &priv->ah_reap_task, - round_jiffies_relative(HZ)); - + ipoib_start_ah_reaper(priv); if (priv->rn_ops->ndo_open(dev)) { pr_warn("%s: Failed to open dev\n", dev->name); goto dev_stop; @@ -915,13 +899,20 @@ int ipoib_ib_dev_open(struct net_device *dev) return 0; dev_stop: - set_bit(IPOIB_STOP_REAPER, &priv->flags); - cancel_delayed_work(&priv->ah_reap_task); - set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - ipoib_ib_dev_stop(dev); + ipoib_stop_ah_reaper(priv); return -1; } +void ipoib_ib_dev_stop(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + priv->rn_ops->ndo_stop(dev); + + clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); + ipoib_stop_ah_reaper(priv); +} + void ipoib_pkey_dev_check_presence(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -1232,7 +1223,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ipoib_mcast_dev_flush(dev); if (oper_up) set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); - ipoib_flush_ah(dev); + ipoib_reap_dead_ahs(priv); } if (level >= IPOIB_FLUSH_NORMAL) @@ -1307,7 +1298,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) * the neighbor garbage collection is stopped and reaped. * That should all be done now, so make a final ah flush. */ - ipoib_stop_ah(dev); + ipoib_reap_dead_ahs(priv); clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 4fd095fd63b6..044bcacad6e4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1979,6 +1979,8 @@ static void ipoib_ndo_uninit(struct net_device *dev) /* no more works over the priv->wq */ if (priv->wq) { + /* See ipoib_mcast_carrier_on_task() */ + WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)); flush_workqueue(priv->wq); destroy_workqueue(priv->wq); priv->wq = NULL; diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index a1a035270cab..71268d61d2b8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -182,15 +182,15 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) rx_desc = isert_conn->rx_descs; for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { - dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf, + ISER_RX_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(ib_dev, dma_addr)) goto dma_map_fail; rx_desc->dma_addr = dma_addr; rx_sg = &rx_desc->rx_sg; - rx_sg->addr = rx_desc->dma_addr; + rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc); rx_sg->length = ISER_RX_PAYLOAD_SIZE; rx_sg->lkey = device->pd->local_dma_lkey; rx_desc->rx_cqe.done = isert_recv_done; @@ -202,7 +202,7 @@ dma_map_fail: rx_desc = isert_conn->rx_descs; for (j = 0; j < i; j++, rx_desc++) { ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); } kfree(isert_conn->rx_descs); isert_conn->rx_descs = NULL; @@ -223,7 +223,7 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) rx_desc = isert_conn->rx_descs; for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); } kfree(isert_conn->rx_descs); @@ -408,10 +408,9 @@ isert_free_login_buf(struct isert_conn *isert_conn) ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); kfree(isert_conn->login_rsp_buf); - ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, - DMA_FROM_DEVICE); - kfree(isert_conn->login_req_buf); + ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); + kfree(isert_conn->login_desc); } static int @@ -420,25 +419,25 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, { int ret; - isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), + isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc), GFP_KERNEL); - if (!isert_conn->login_req_buf) + if (!isert_conn->login_desc) return -ENOMEM; - isert_conn->login_req_dma = ib_dma_map_single(ib_dev, - isert_conn->login_req_buf, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); - ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); + isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev, + isert_conn->login_desc->buf, + ISER_RX_SIZE, DMA_FROM_DEVICE); + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr); if (ret) { - isert_err("login_req_dma mapping error: %d\n", ret); - isert_conn->login_req_dma = 0; - goto out_free_login_req_buf; + isert_err("login_desc dma mapping error: %d\n", ret); + isert_conn->login_desc->dma_addr = 0; + goto out_free_login_desc; } isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); if (!isert_conn->login_rsp_buf) { ret = -ENOMEM; - goto out_unmap_login_req_buf; + goto out_unmap_login_desc; } isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, @@ -455,11 +454,11 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, out_free_login_rsp_buf: kfree(isert_conn->login_rsp_buf); -out_unmap_login_req_buf: - ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); -out_free_login_req_buf: - kfree(isert_conn->login_req_buf); +out_unmap_login_desc: + ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); +out_free_login_desc: + kfree(isert_conn->login_desc); return ret; } @@ -578,7 +577,7 @@ isert_connect_release(struct isert_conn *isert_conn) ib_destroy_qp(isert_conn->qp); } - if (isert_conn->login_req_buf) + if (isert_conn->login_desc) isert_free_login_buf(isert_conn); isert_device_put(device); @@ -964,17 +963,18 @@ isert_login_post_recv(struct isert_conn *isert_conn) int ret; memset(&sge, 0, sizeof(struct ib_sge)); - sge.addr = isert_conn->login_req_dma; + sge.addr = isert_conn->login_desc->dma_addr + + isert_get_hdr_offset(isert_conn->login_desc); sge.length = ISER_RX_PAYLOAD_SIZE; sge.lkey = isert_conn->device->pd->local_dma_lkey; isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", sge.addr, sge.length, sge.lkey); - isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; + isert_conn->login_desc->rx_cqe.done = isert_login_recv_done; memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); - rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; + rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe; rx_wr.sg_list = &sge; rx_wr.num_sge = 1; @@ -1051,7 +1051,7 @@ post_send: static void isert_rx_login_req(struct isert_conn *isert_conn) { - struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; + struct iser_rx_desc *rx_desc = isert_conn->login_desc; int rx_buflen = isert_conn->login_req_len; struct iscsi_conn *conn = isert_conn->conn; struct iscsi_login *login = conn->conn_login; @@ -1063,7 +1063,7 @@ isert_rx_login_req(struct isert_conn *isert_conn) if (login->first_request) { struct iscsi_login_req *login_req = - (struct iscsi_login_req *)&rx_desc->iscsi_header; + (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc); /* * Setup the initial iscsi_login values from the leading * login request PDU. @@ -1082,13 +1082,13 @@ isert_rx_login_req(struct isert_conn *isert_conn) login->tsih = be16_to_cpu(login_req->tsih); } - memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); + memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN); size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); isert_dbg("Using login payload size: %d, rx_buflen: %d " "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, MAX_KEY_VALUE_PAIRS); - memcpy(login->req_buf, &rx_desc->data[0], size); + memcpy(login->req_buf, isert_get_data(rx_desc), size); if (login->first_request) { complete(&isert_conn->login_comp); @@ -1153,14 +1153,15 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, if (imm_data_len != data_len) { sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, - &rx_desc->data[0], imm_data_len); + isert_get_data(rx_desc), imm_data_len); isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", sg_nents, imm_data_len); } else { sg_init_table(&isert_cmd->sg, 1); cmd->se_cmd.t_data_sg = &isert_cmd->sg; cmd->se_cmd.t_data_nents = 1; - sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); + sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc), + imm_data_len); isert_dbg("Transfer Immediate imm_data_len: %d\n", imm_data_len); } @@ -1229,9 +1230,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, } isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " "sg_nents: %u from %p %u\n", sg_start, sg_off, - sg_nents, &rx_desc->data[0], unsol_data_len); + sg_nents, isert_get_data(rx_desc), unsol_data_len); - sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], + sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc), unsol_data_len); rc = iscsit_check_dataout_payload(cmd, hdr, false); @@ -1290,7 +1291,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd } cmd->text_in_ptr = text_in; - memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); + memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length); return iscsit_process_text_cmd(conn, cmd, hdr); } @@ -1300,7 +1301,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, uint32_t read_stag, uint64_t read_va, uint32_t write_stag, uint64_t write_va) { - struct iscsi_hdr *hdr = &rx_desc->iscsi_header; + struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); struct iscsi_conn *conn = isert_conn->conn; struct iscsi_cmd *cmd; struct isert_cmd *isert_cmd; @@ -1398,8 +1399,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) struct isert_conn *isert_conn = wc->qp->qp_context; struct ib_device *ib_dev = isert_conn->cm_id->device; struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); - struct iscsi_hdr *hdr = &rx_desc->iscsi_header; - struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; + struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); + struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc); uint64_t read_va = 0, write_va = 0; uint32_t read_stag = 0, write_stag = 0; @@ -1413,7 +1414,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) rx_desc->in_use = true; ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, @@ -1448,7 +1449,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) read_stag, read_va, write_stag, write_va); ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); } static void @@ -1462,8 +1463,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) return; } - ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; @@ -1478,8 +1479,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) complete(&isert_conn->login_req_comp); mutex_unlock(&isert_conn->mutex); - ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); } static void diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 3b296bac4f60..d267a6d60d87 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -59,9 +59,11 @@ ISERT_MAX_TX_MISC_PDUS + \ ISERT_MAX_RX_MISC_PDUS) -#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ - (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ - sizeof(struct ib_cqe) + sizeof(bool))) +/* + * RX size is default of 8k plus headers, but data needs to align to + * 512 boundary, so use 1024 to have the extra space for alignment. + */ +#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024) #define ISCSI_ISER_SG_TABLESIZE 256 @@ -80,21 +82,41 @@ enum iser_conn_state { }; struct iser_rx_desc { - struct iser_ctrl iser_header; - struct iscsi_hdr iscsi_header; - char data[ISCSI_DEF_MAX_RECV_SEG_LEN]; + char buf[ISER_RX_SIZE]; u64 dma_addr; struct ib_sge rx_sg; struct ib_cqe rx_cqe; bool in_use; - char pad[ISER_RX_PAD_SIZE]; -} __packed; +}; static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe) { return container_of(cqe, struct iser_rx_desc, rx_cqe); } +static void *isert_get_iser_hdr(struct iser_rx_desc *desc) +{ + return PTR_ALIGN(desc->buf + ISER_HEADERS_LEN, 512) - ISER_HEADERS_LEN; +} + +static size_t isert_get_hdr_offset(struct iser_rx_desc *desc) +{ + return isert_get_iser_hdr(desc) - (void *)desc->buf; +} + +static void *isert_get_iscsi_hdr(struct iser_rx_desc *desc) +{ + return isert_get_iser_hdr(desc) + sizeof(struct iser_ctrl); +} + +static void *isert_get_data(struct iser_rx_desc *desc) +{ + void *data = isert_get_iser_hdr(desc) + ISER_HEADERS_LEN; + + WARN_ON((uintptr_t)data & 511); + return data; +} + struct iser_tx_desc { struct iser_ctrl iser_header; struct iscsi_hdr iscsi_header; @@ -141,9 +163,8 @@ struct isert_conn { u32 responder_resources; u32 initiator_depth; bool pi_support; - struct iser_rx_desc *login_req_buf; + struct iser_rx_desc *login_desc; char *login_rsp_buf; - u64 login_req_dma; int login_req_len; u64 login_rsp_dma; struct iser_rx_desc *rx_descs; diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 8719da540383..196e8505dd8d 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -951,6 +951,8 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet) u8 hover_info = packet[ETP_HOVER_INFO_OFFSET]; bool contact_valid, hover_event; + pm_wakeup_event(&data->client->dev, 0); + hover_event = hover_info & 0x40; for (i = 0; i < ETP_MAX_FINGERS; i++) { contact_valid = tp_info & (1U << (3 + i)); @@ -974,6 +976,8 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report) u8 *packet = &report[ETP_REPORT_ID_OFFSET + 1]; int x, y; + pm_wakeup_event(&data->client->dev, 0); + if (!data->tp_input) { dev_warn_once(&data->client->dev, "received a trackpoint report while no trackpoint device has been created. Please report upstream.\n"); @@ -998,7 +1002,6 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report) static irqreturn_t elan_isr(int irq, void *dev_id) { struct elan_tp_data *data = dev_id; - struct device *dev = &data->client->dev; int error; u8 report[ETP_MAX_REPORT_LEN]; @@ -1016,8 +1019,6 @@ static irqreturn_t elan_isr(int irq, void *dev_id) if (error) goto out; - pm_wakeup_event(dev, 0); - switch (report[ETP_REPORT_ID_OFFSET]) { case ETP_REPORT_ID: elan_report_absolute(data, report); @@ -1026,7 +1027,7 @@ static irqreturn_t elan_isr(int irq, void *dev_id) elan_report_trackpoint(data, report); break; default: - dev_err(dev, "invalid report id data (%x)\n", + dev_err(&data->client->dev, "invalid report id data (%x)\n", report[ETP_REPORT_ID_OFFSET]); } diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 527ae0b9a191..0b4a3039f312 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -2042,7 +2042,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp) { int type = *((unsigned int *)kp->arg); - return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name); + return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); } static int __init psmouse_init(void) diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c index e99d9bf1a267..e78c4c7eda34 100644 --- a/drivers/input/mouse/sentelic.c +++ b/drivers/input/mouse/sentelic.c @@ -441,7 +441,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data, fsp_reg_write_enable(psmouse, false); - return count; + return retval; } PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg); diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 758dae8d6500..4b81b2d0fe06 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -179,6 +179,7 @@ static const char * const smbus_pnp_ids[] = { "LEN0093", /* T480 */ "LEN0096", /* X280 */ "LEN0097", /* X280 -> ALPS trackpoint */ + "LEN0099", /* X1 Extreme 1st */ "LEN009b", /* T580 */ "LEN200f", /* T450s */ "LEN2044", /* L470 */ diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 3eefee2ee2a1..854d5e758724 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -17,10 +17,12 @@ #include "trackpoint.h" static const char * const trackpoint_variants[] = { - [TP_VARIANT_IBM] = "IBM", - [TP_VARIANT_ALPS] = "ALPS", - [TP_VARIANT_ELAN] = "Elan", - [TP_VARIANT_NXP] = "NXP", + [TP_VARIANT_IBM] = "IBM", + [TP_VARIANT_ALPS] = "ALPS", + [TP_VARIANT_ELAN] = "Elan", + [TP_VARIANT_NXP] = "NXP", + [TP_VARIANT_JYT_SYNAPTICS] = "JYT_Synaptics", + [TP_VARIANT_SYNAPTICS] = "Synaptics", }; /* diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 5cb93ed26085..eb5412904fe0 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -24,10 +24,12 @@ * 0x01 was the original IBM trackpoint, others implement very limited * subset of trackpoint features. */ -#define TP_VARIANT_IBM 0x01 -#define TP_VARIANT_ALPS 0x02 -#define TP_VARIANT_ELAN 0x03 -#define TP_VARIANT_NXP 0x04 +#define TP_VARIANT_IBM 0x01 +#define TP_VARIANT_ALPS 0x02 +#define TP_VARIANT_ELAN 0x03 +#define TP_VARIANT_NXP 0x04 +#define TP_VARIANT_JYT_SYNAPTICS 0x05 +#define TP_VARIANT_SYNAPTICS 0x06 /* * Commands diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h deleted file mode 100644 index 391f94d9e47d..000000000000 --- a/drivers/input/serio/i8042-ppcio.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _I8042_PPCIO_H -#define _I8042_PPCIO_H - - -#if defined(CONFIG_WALNUT) - -#define I8042_KBD_IRQ 25 -#define I8042_AUX_IRQ 26 - -#define I8042_KBD_PHYS_DESC "walnutps2/serio0" -#define I8042_AUX_PHYS_DESC "walnutps2/serio1" -#define I8042_MUX_PHYS_DESC "walnutps2/serio%d" - -extern void *kb_cs; -extern void *kb_data; - -#define I8042_COMMAND_REG (*(int *)kb_cs) -#define I8042_DATA_REG (*(int *)kb_data) - -static inline int i8042_read_data(void) -{ - return readb(kb_data); -} - -static inline int i8042_read_status(void) -{ - return readb(kb_cs); -} - -static inline void i8042_write_data(int val) -{ - writeb(val, kb_data); -} - -static inline void i8042_write_command(int val) -{ - writeb(val, kb_cs); -} - -static inline int i8042_platform_init(void) -{ - i8042_reset = I8042_RESET_ALWAYS; - return 0; -} - -static inline void i8042_platform_exit(void) -{ -} - -#else - -#include "i8042-io.h" - -#endif - -#endif /* _I8042_PPCIO_H */ diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 7e048b557462..42771b9b10a0 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -425,6 +425,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), }, }, + { + /* Lenovo XiaoXin Air 12 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80UN"), + }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -541,6 +548,14 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), }, }, + { + /* Entroware Proteus */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), + DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), + }, + }, { } }; @@ -669,6 +684,14 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), }, }, + { + /* Entroware Proteus */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), + DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), + }, + }, { } }; diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h index 38dc27ad3c18..eb376700dfff 100644 --- a/drivers/input/serio/i8042.h +++ b/drivers/input/serio/i8042.h @@ -17,8 +17,6 @@ #include "i8042-ip22io.h" #elif defined(CONFIG_SNI_RM) #include "i8042-snirm.h" -#elif defined(CONFIG_PPC) -#include "i8042-ppcio.h" #elif defined(CONFIG_SPARC) #include "i8042-sparcio.h" #elif defined(CONFIG_X86) || defined(CONFIG_IA64) diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 240e8de24cd2..b41b97c962ed 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -935,19 +935,25 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev, error = device_property_read_u32(dev, "offset", &val); if (!error) { - edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val); + if (reg_addr->reg_offset != NO_REGISTER) + edt_ft5x06_register_write(tsdata, + reg_addr->reg_offset, val); tsdata->offset = val; } error = device_property_read_u32(dev, "offset-x", &val); if (!error) { - edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x, val); + if (reg_addr->reg_offset_x != NO_REGISTER) + edt_ft5x06_register_write(tsdata, + reg_addr->reg_offset_x, val); tsdata->offset_x = val; } error = device_property_read_u32(dev, "offset-y", &val); if (!error) { - edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y, val); + if (reg_addr->reg_offset_y != NO_REGISTER) + edt_ft5x06_register_write(tsdata, + reg_addr->reg_offset_y, val); tsdata->offset_y = val; } } diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 0403102e807e..37b35ab97beb 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -168,6 +168,22 @@ static const struct dmi_system_id nine_bytes_report[] = { {} }; +/* + * Those tablets have their x coordinate inverted + */ +static const struct dmi_system_id inverted_x_screen[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + .ident = "Cube I15-TC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Cube"), + DMI_MATCH(DMI_PRODUCT_NAME, "I15-TC") + }, + }, +#endif + {} +}; + /** * goodix_i2c_read - read data from a register of the i2c slave device. * @@ -780,6 +796,12 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) "Non-standard 9-bytes report format quirk\n"); } + if (dmi_check_system(inverted_x_screen)) { + ts->prop.invert_x = true; + dev_dbg(&ts->client->dev, + "Applying 'inverted x screen' quirk\n"); + } + error = input_mt_init_slots(ts->input_dev, ts->max_touch_num, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); if (error) { diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index fca908ba4841..fb28fd2d6f1c 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -54,6 +54,7 @@ enum mms_type { TYPE_MMS114 = 114, TYPE_MMS152 = 152, + TYPE_MMS345L = 345, }; struct mms114_data { @@ -250,6 +251,15 @@ static int mms114_get_version(struct mms114_data *data) int error; switch (data->type) { + case TYPE_MMS345L: + error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf); + if (error) + return error; + + dev_info(dev, "TSP FW Rev: bootloader 0x%x / core 0x%x / config 0x%x\n", + buf[0], buf[1], buf[2]); + break; + case TYPE_MMS152: error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf); if (error) @@ -287,8 +297,8 @@ static int mms114_setup_regs(struct mms114_data *data) if (error < 0) return error; - /* MMS152 has no configuration or power on registers */ - if (data->type == TYPE_MMS152) + /* Only MMS114 has configuration and power on registers */ + if (data->type != TYPE_MMS114) return 0; error = mms114_set_active(data, true); @@ -598,6 +608,9 @@ static const struct of_device_id mms114_dt_match[] = { }, { .compatible = "melfas,mms152", .data = (void *)TYPE_MMS152, + }, { + .compatible = "melfas,mms345l", + .data = (void *)TYPE_MMS345L, }, { } }; diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index e3842eabcfdd..390568afee9f 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -205,7 +205,7 @@ config INTEL_IOMMU_DEBUGFS config INTEL_IOMMU_SVM bool "Support for Shared Virtual Memory with Intel IOMMU" - depends on INTEL_IOMMU && X86 + depends on INTEL_IOMMU && X86_64 select PCI_PASID select MMU_NOTIFIER help diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 32de8e7bb8b4..fa91d856a43e 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -4431,14 +4431,18 @@ int amd_iommu_deactivate_guest_mode(void *data) struct amd_ir_data *ir_data = (struct amd_ir_data *)data; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; struct irq_cfg *cfg = ir_data->cfg; + u64 valid; if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry || !entry->lo.fields_vapic.guest_mode) return 0; + valid = entry->lo.fields_remap.valid; + entry->lo.val = 0; entry->hi.val = 0; + entry->lo.fields_remap.valid = valid; entry->lo.fields_remap.dm = apic->irq_dest_mode; entry->lo.fields_remap.int_type = apic->irq_delivery_mode; entry->hi.fields.vector = cfg->vector; @@ -4575,9 +4579,10 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) if (!fn) return -ENOMEM; iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); - irq_domain_free_fwnode(fn); - if (!iommu->ir_domain) + if (!iommu->ir_domain) { + irq_domain_free_fwnode(fn); return -ENOMEM; + } iommu->ir_domain->parent = arch_get_ir_parent_domain(); iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index d6d85debd01b..05f3d93cf480 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -741,6 +741,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) might_sleep(); + /* + * When memory encryption is active the device is likely not in a + * direct-mapped domain. Forbid using IOMMUv2 functionality for now. + */ + if (mem_encrypt_active()) + return -ENODEV; + if (!amd_iommu_v2_supported()) return -ENODEV; diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 9e393b9c5091..30ac0ba55864 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -898,7 +898,8 @@ int __init detect_intel_iommu(void) if (!ret) ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, &validate_drhd_cb); - if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) { + if (!ret && !no_iommu && !iommu_detected && + (!dmar_disabled || dmar_platform_optin())) { iommu_detected = 1; /* Make sure ACS will be enabled */ pci_request_acs(); diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c index a386b83e0e34..f0fe5030acd3 100644 --- a/drivers/iommu/hyperv-iommu.c +++ b/drivers/iommu/hyperv-iommu.c @@ -155,7 +155,10 @@ static int __init hyperv_prepare_irq_remapping(void) 0, IOAPIC_REMAPPING_ENTRY, fn, &hyperv_ir_domain_ops, NULL); - irq_domain_free_fwnode(fn); + if (!ioapic_ir_domain) { + irq_domain_free_fwnode(fn); + return -ENOMEM; + } /* * Hyper-V doesn't provide irq remapping function for diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 773ac2b0d606..2ffec65df388 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -123,29 +123,29 @@ static inline unsigned int level_to_offset_bits(int level) return (level - 1) * LEVEL_STRIDE; } -static inline int pfn_level_offset(unsigned long pfn, int level) +static inline int pfn_level_offset(u64 pfn, int level) { return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; } -static inline unsigned long level_mask(int level) +static inline u64 level_mask(int level) { - return -1UL << level_to_offset_bits(level); + return -1ULL << level_to_offset_bits(level); } -static inline unsigned long level_size(int level) +static inline u64 level_size(int level) { - return 1UL << level_to_offset_bits(level); + return 1ULL << level_to_offset_bits(level); } -static inline unsigned long align_to_level(unsigned long pfn, int level) +static inline u64 align_to_level(u64 pfn, int level) { return (pfn + level_size(level) - 1) & level_mask(level); } static inline unsigned long lvl_to_nr_pages(unsigned int lvl) { - return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH); + return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH); } /* VT-d pages must always be _smaller_ than MM pages. Otherwise things @@ -611,6 +611,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) return g_iommus[iommu_id]; } +static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu) +{ + return sm_supported(iommu) ? + ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); +} + static void domain_update_iommu_coherency(struct dmar_domain *domain) { struct dmar_drhd_unit *drhd; @@ -622,7 +628,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) for_each_domain_iommu(i, domain) { found = true; - if (!ecap_coherent(g_iommus[i]->ecap)) { + if (!iommu_paging_structure_coherency(g_iommus[i])) { domain->iommu_coherency = 0; break; } @@ -633,7 +639,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) /* No hardware attached; use lowest common denominator */ rcu_read_lock(); for_each_active_iommu(iommu, drhd) { - if (!ecap_coherent(iommu->ecap)) { + if (!iommu_paging_structure_coherency(iommu)) { domain->iommu_coherency = 0; break; } @@ -2090,7 +2096,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, context_set_fault_enable(context); context_set_present(context); - domain_flush_cache(domain, context, sizeof(*context)); + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(context, sizeof(*context)); /* * It's a non-present to present mapping. If hardware doesn't cache @@ -5955,6 +5962,23 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO; } +/* + * Check that the device does not live on an external facing PCI port that is + * marked as untrusted. Such devices should not be able to apply quirks and + * thus not be able to bypass the IOMMU restrictions. + */ +static bool risky_device(struct pci_dev *pdev) +{ + if (pdev->untrusted) { + pci_info(pdev, + "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n", + pdev->vendor, pdev->device); + pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n"); + return true; + } + return false; +} + const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, @@ -5983,6 +6007,9 @@ const struct iommu_ops intel_iommu_ops = { static void quirk_iommu_igfx(struct pci_dev *dev) { + if (risky_device(dev)) + return; + pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); dmar_map_gfx = 0; } @@ -6024,6 +6051,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx); static void quirk_iommu_rwbf(struct pci_dev *dev) { + if (risky_device(dev)) + return; + /* * Mobile 4 Series Chipset neglects to set RWBF capability, * but needs it. Same seems to hold for the desktop versions. @@ -6054,6 +6084,9 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) { unsigned short ggc; + if (risky_device(dev)) + return; + if (pci_read_config_word(dev, GGC, &ggc)) return; @@ -6087,6 +6120,12 @@ static void __init check_tylersburg_isoch(void) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); if (!pdev) return; + + if (risky_device(pdev)) { + pci_dev_put(pdev); + return; + } + pci_dev_put(pdev); /* System Management Registers. Might be hidden, in which case @@ -6096,6 +6135,11 @@ static void __init check_tylersburg_isoch(void) if (!pdev) return; + if (risky_device(pdev)) { + pci_dev_put(pdev); + return; + } + if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { pci_dev_put(pdev); return; diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 81e43c1df7ec..f697f3a1d46b 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -507,12 +507,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu) /* Enable interrupt-remapping */ iommu->gcmd |= DMA_GCMD_IRE; - iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRES), sts); + /* Block compatibility-format MSIs */ + if (sts & DMA_GSTS_CFIS) { + iommu->gcmd &= ~DMA_GCMD_CFI; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, !(sts & DMA_GSTS_CFIS), sts); + } + /* * With CFI clear in the Global Command register, we should be * protected from dangerous (i.e. compatibility) interrupts @@ -563,8 +569,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) 0, INTR_REMAP_TABLE_ENTRIES, fn, &intel_ir_domain_ops, iommu); - irq_domain_free_fwnode(fn); if (!iommu->ir_domain) { + irq_domain_free_fwnode(fn); pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); goto out_free_bitmap; } @@ -628,13 +634,21 @@ out_free_table: static void intel_teardown_irq_remapping(struct intel_iommu *iommu) { + struct fwnode_handle *fn; + if (iommu && iommu->ir_table) { if (iommu->ir_msi_domain) { + fn = iommu->ir_msi_domain->fwnode; + irq_domain_remove(iommu->ir_msi_domain); + irq_domain_free_fwnode(fn); iommu->ir_msi_domain = NULL; } if (iommu->ir_domain) { + fn = iommu->ir_domain->fwnode; + irq_domain_remove(iommu->ir_domain); + irq_domain_free_fwnode(fn); iommu->ir_domain = NULL; } free_pages((unsigned long)iommu->ir_table->base, diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 0e6a9536eca6..612cbf668adf 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -811,7 +811,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) for (i = 0 ; i < mag->size; ++i) { struct iova *iova = private_find_iova(iovad, mag->pfns[i]); - BUG_ON(!iova); + if (WARN_ON(!iova)) + continue; + private_free_iova(iovad, iova); } diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 8e19bfa94121..a99afb5d9011 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -98,8 +98,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, mutex_lock(&iommu_debug_lock); bytes = omap_iommu_dump_ctx(obj, p, count); + if (bytes < 0) + goto err; bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); +err: mutex_unlock(&iommu_debug_lock); kfree(buf); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 263cf9240b16..7966b19ceba7 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -2581,6 +2581,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, msi_alloc_info_t *info = args; struct its_device *its_dev = info->scratchpad[0].ptr; struct its_node *its = its_dev->its; + struct irq_data *irqd; irq_hw_number_t hwirq; int err; int i; @@ -2600,7 +2601,9 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &its_irq_chip, its_dev); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); pr_debug("ID:%d pID:%d vID:%d\n", (int)(hwirq + i - its_dev->event_map.lpi_base), (int)(hwirq + i), virq + i); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 30ab623343d3..882204d1ef4f 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -329,10 +329,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); - unsigned int cpu, shift = (gic_irq(d) % 4) * 8; - u32 val, mask, bit; - unsigned long flags; + void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d); + unsigned int cpu; if (!force) cpu = cpumask_any_and(mask_val, cpu_online_mask); @@ -342,13 +340,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) return -EINVAL; - gic_lock_irqsave(flags); - mask = 0xff << shift; - bit = gic_cpu_map[cpu] << shift; - val = readl_relaxed(reg) & ~mask; - writel_relaxed(val | bit, reg); - gic_unlock_irqrestore(flags); - + writeb_relaxed(gic_cpu_map[cpu], reg); irq_data_update_effective_affinity(d, cpumask_of(cpu)); return IRQ_SET_MASK_OK_DONE; diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c index 73eae5966a40..6ff98b87e5c0 100644 --- a/drivers/irqchip/irq-mtk-sysirq.c +++ b/drivers/irqchip/irq-mtk-sysirq.c @@ -15,7 +15,7 @@ #include struct mtk_sysirq_chip_data { - spinlock_t lock; + raw_spinlock_t lock; u32 nr_intpol_bases; void __iomem **intpol_bases; u32 *intpol_words; @@ -37,7 +37,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type) reg_index = chip_data->which_word[hwirq]; offset = hwirq & 0x1f; - spin_lock_irqsave(&chip_data->lock, flags); + raw_spin_lock_irqsave(&chip_data->lock, flags); value = readl_relaxed(base + reg_index * 4); if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) { if (type == IRQ_TYPE_LEVEL_LOW) @@ -53,7 +53,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type) data = data->parent_data; ret = data->chip->irq_set_type(data, type); - spin_unlock_irqrestore(&chip_data->lock, flags); + raw_spin_unlock_irqrestore(&chip_data->lock, flags); return ret; } @@ -212,7 +212,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node, ret = -ENOMEM; goto out_free_which_word; } - spin_lock_init(&chip_data->lock); + raw_spin_lock_init(&chip_data->lock); return 0; diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index e00f2fa27f00..a8322a4e18d3 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -431,6 +431,16 @@ static void stm32_irq_ack(struct irq_data *d) irq_gc_unlock(gc); } +/* directly set the target bit without reading first. */ +static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg) +{ + struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + void __iomem *base = chip_data->host_data->base; + u32 val = BIT(d->hwirq % IRQS_PER_BANK); + + writel_relaxed(val, base + reg); +} + static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg) { struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); @@ -464,9 +474,9 @@ static void stm32_exti_h_eoi(struct irq_data *d) raw_spin_lock(&chip_data->rlock); - stm32_exti_set_bit(d, stm32_bank->rpr_ofst); + stm32_exti_write_bit(d, stm32_bank->rpr_ofst); if (stm32_bank->fpr_ofst != UNDEF_REG) - stm32_exti_set_bit(d, stm32_bank->fpr_ofst); + stm32_exti_write_bit(d, stm32_bank->fpr_ofst); raw_spin_unlock(&chip_data->rlock); diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c index fa7488863bd0..0a35499c4672 100644 --- a/drivers/irqchip/irq-ti-sci-inta.c +++ b/drivers/irqchip/irq-ti-sci-inta.c @@ -571,7 +571,7 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); inta->base = devm_ioremap_resource(dev, res); if (IS_ERR(inta->base)) - return -ENODEV; + return PTR_ERR(inta->base); domain = irq_domain_add_linear(dev_of_node(dev), ti_sci_get_num_resources(inta->vint), diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index d3e83c33783e..0a4823d9797a 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -172,6 +172,7 @@ void led_classdev_suspend(struct led_classdev *led_cdev) { led_cdev->flags |= LED_SUSPENDED; led_set_brightness_nopm(led_cdev, 0); + flush_work(&led_cdev->set_brightness_work); } EXPORT_SYMBOL_GPL(led_classdev_suspend); diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c index b3044c9a8120..465c3755cf2e 100644 --- a/drivers/leds/leds-88pm860x.c +++ b/drivers/leds/leds-88pm860x.c @@ -203,21 +203,33 @@ static int pm860x_led_probe(struct platform_device *pdev) data->cdev.brightness_set_blocking = pm860x_led_set; mutex_init(&data->lock); - ret = devm_led_classdev_register(chip->dev, &data->cdev); + ret = led_classdev_register(chip->dev, &data->cdev); if (ret < 0) { dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); return ret; } pm860x_led_set(&data->cdev, 0); + + platform_set_drvdata(pdev, data); + return 0; } +static int pm860x_led_remove(struct platform_device *pdev) +{ + struct pm860x_led *data = platform_get_drvdata(pdev); + + led_classdev_unregister(&data->cdev); + + return 0; +} static struct platform_driver pm860x_led_driver = { .driver = { .name = "88pm860x-led", }, .probe = pm860x_led_probe, + .remove = pm860x_led_remove, }; module_platform_driver(pm860x_led_driver); diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c index ed1b303f699f..2b5fb00438a2 100644 --- a/drivers/leds/leds-da903x.c +++ b/drivers/leds/leds-da903x.c @@ -110,12 +110,23 @@ static int da903x_led_probe(struct platform_device *pdev) led->flags = pdata->flags; led->master = pdev->dev.parent; - ret = devm_led_classdev_register(led->master, &led->cdev); + ret = led_classdev_register(led->master, &led->cdev); if (ret) { dev_err(&pdev->dev, "failed to register LED %d\n", id); return ret; } + platform_set_drvdata(pdev, led); + + return 0; +} + +static int da903x_led_remove(struct platform_device *pdev) +{ + struct da903x_led *led = platform_get_drvdata(pdev); + + led_classdev_unregister(&led->cdev); + return 0; } @@ -124,6 +135,7 @@ static struct platform_driver da903x_led_driver = { .name = "da903x-led", }, .probe = da903x_led_probe, + .remove = da903x_led_remove, }; module_platform_driver(da903x_led_driver); diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c index 9504ad405aef..b3edee703193 100644 --- a/drivers/leds/leds-lm3533.c +++ b/drivers/leds/leds-lm3533.c @@ -694,7 +694,7 @@ static int lm3533_led_probe(struct platform_device *pdev) platform_set_drvdata(pdev, led); - ret = devm_led_classdev_register(pdev->dev.parent, &led->cdev); + ret = led_classdev_register(pdev->dev.parent, &led->cdev); if (ret) { dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id); return ret; @@ -704,13 +704,18 @@ static int lm3533_led_probe(struct platform_device *pdev) ret = lm3533_led_setup(led, pdata); if (ret) - return ret; + goto err_deregister; ret = lm3533_ctrlbank_enable(&led->cb); if (ret) - return ret; + goto err_deregister; return 0; + +err_deregister: + led_classdev_unregister(&led->cdev); + + return ret; } static int lm3533_led_remove(struct platform_device *pdev) @@ -720,6 +725,7 @@ static int lm3533_led_remove(struct platform_device *pdev) dev_dbg(&pdev->dev, "%s\n", __func__); lm3533_ctrlbank_disable(&led->cb); + led_classdev_unregister(&led->cdev); return 0; } diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c index a5abb499574b..129f475aebf2 100644 --- a/drivers/leds/leds-lm355x.c +++ b/drivers/leds/leds-lm355x.c @@ -165,18 +165,19 @@ static int lm355x_chip_init(struct lm355x_chip_data *chip) /* input and output pins configuration */ switch (chip->type) { case CHIP_LM3554: - reg_val = pdata->pin_tx2 | pdata->ntc_pin; + reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin; ret = regmap_update_bits(chip->regmap, 0xE0, 0x28, reg_val); if (ret < 0) goto out; - reg_val = pdata->pass_mode; + reg_val = (u32)pdata->pass_mode; ret = regmap_update_bits(chip->regmap, 0xA0, 0x04, reg_val); if (ret < 0) goto out; break; case CHIP_LM3556: - reg_val = pdata->pin_tx2 | pdata->ntc_pin | pdata->pass_mode; + reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin | + (u32)pdata->pass_mode; ret = regmap_update_bits(chip->regmap, 0x0A, 0xC4, reg_val); if (ret < 0) goto out; diff --git a/drivers/leds/leds-lm36274.c b/drivers/leds/leds-lm36274.c index 836b60c9a2b8..db842eeb7ca2 100644 --- a/drivers/leds/leds-lm36274.c +++ b/drivers/leds/leds-lm36274.c @@ -133,7 +133,7 @@ static int lm36274_probe(struct platform_device *pdev) lm36274_data->pdev = pdev; lm36274_data->dev = lmu->dev; lm36274_data->regmap = lmu->regmap; - dev_set_drvdata(&pdev->dev, lm36274_data); + platform_set_drvdata(pdev, lm36274_data); ret = lm36274_parse_dt(lm36274_data); if (ret) { @@ -147,8 +147,16 @@ static int lm36274_probe(struct platform_device *pdev) return ret; } - return devm_led_classdev_register(lm36274_data->dev, - &lm36274_data->led_dev); + return led_classdev_register(lm36274_data->dev, &lm36274_data->led_dev); +} + +static int lm36274_remove(struct platform_device *pdev) +{ + struct lm36274 *lm36274_data = platform_get_drvdata(pdev); + + led_classdev_unregister(&lm36274_data->led_dev); + + return 0; } static const struct of_device_id of_lm36274_leds_match[] = { @@ -159,6 +167,7 @@ MODULE_DEVICE_TABLE(of, of_lm36274_leds_match); static struct platform_driver lm36274_driver = { .probe = lm36274_probe, + .remove = lm36274_remove, .driver = { .name = "lm36274-leds", }, diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c index 082df7f1dd90..67f4235cb28a 100644 --- a/drivers/leds/leds-wm831x-status.c +++ b/drivers/leds/leds-wm831x-status.c @@ -269,12 +269,23 @@ static int wm831x_status_probe(struct platform_device *pdev) drvdata->cdev.blink_set = wm831x_status_blink_set; drvdata->cdev.groups = wm831x_status_groups; - ret = devm_led_classdev_register(wm831x->dev, &drvdata->cdev); + ret = led_classdev_register(wm831x->dev, &drvdata->cdev); if (ret < 0) { dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); return ret; } + platform_set_drvdata(pdev, drvdata); + + return 0; +} + +static int wm831x_status_remove(struct platform_device *pdev) +{ + struct wm831x_status *drvdata = platform_get_drvdata(pdev); + + led_classdev_unregister(&drvdata->cdev); + return 0; } @@ -283,6 +294,7 @@ static struct platform_driver wm831x_status_driver = { .name = "wm831x-status", }, .probe = wm831x_status_probe, + .remove = wm831x_status_remove, }; module_platform_driver(wm831x_status_driver); diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index ac824d7b2dcf..6aa903529570 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -270,15 +270,12 @@ static int macii_autopoll(int devs) unsigned long flags; int err = 0; + local_irq_save(flags); + /* bit 1 == device 1, and so on. */ autopoll_devs = devs & 0xFFFE; - if (!autopoll_devs) - return 0; - - local_irq_save(flags); - - if (current_req == NULL) { + if (autopoll_devs && !current_req) { /* Send a Talk Reg 0. The controller will repeatedly transmit * this as long as it is idle. */ diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c index 4150301a89a5..e8377ce0a95a 100644 --- a/drivers/macintosh/windfarm_pm112.c +++ b/drivers/macintosh/windfarm_pm112.c @@ -132,14 +132,6 @@ static int create_cpu_loop(int cpu) s32 tmax; int fmin; - /* Get PID params from the appropriate SAT */ - hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); - if (hdr == NULL) { - printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); - return -EINVAL; - } - piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; - /* Get FVT params to get Tmax; if not found, assume default */ hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL); if (hdr) { @@ -152,6 +144,16 @@ static int create_cpu_loop(int cpu) if (tmax < cpu_all_tmax) cpu_all_tmax = tmax; + kfree(hdr); + + /* Get PID params from the appropriate SAT */ + hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); + if (hdr == NULL) { + printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); + return -EINVAL; + } + piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; + /* * Darwin has a minimum fan speed of 1000 rpm for the 4-way and * 515 for the 2-way. That appears to be overkill, so for now, @@ -174,6 +176,9 @@ static int create_cpu_loop(int cpu) pid.min = fmin; wf_cpu_pid_init(&cpu_pid[cpu], &pid); + + kfree(hdr); + return 0; } diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c index 86887c9a349a..f9cc674ba9b7 100644 --- a/drivers/mailbox/zynqmp-ipi-mailbox.c +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -504,10 +504,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, mchan->req_buf_size = resource_size(&res); mchan->req_buf = devm_ioremap(mdev, res.start, mchan->req_buf_size); - if (IS_ERR(mchan->req_buf)) { + if (!mchan->req_buf) { dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); - ret = PTR_ERR(mchan->req_buf); - return ret; + return -ENOMEM; } } else if (ret != -ENODEV) { dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret); @@ -520,10 +519,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, mchan->resp_buf_size = resource_size(&res); mchan->resp_buf = devm_ioremap(mdev, res.start, mchan->resp_buf_size); - if (IS_ERR(mchan->resp_buf)) { + if (!mchan->resp_buf) { dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); - ret = PTR_ERR(mchan->resp_buf); - return ret; + return -ENOMEM; } } else if (ret != -ENODEV) { dev_err(mdev, "Unmatched resource %s.\n", name); @@ -543,10 +541,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, mchan->req_buf_size = resource_size(&res); mchan->req_buf = devm_ioremap(mdev, res.start, mchan->req_buf_size); - if (IS_ERR(mchan->req_buf)) { + if (!mchan->req_buf) { dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); - ret = PTR_ERR(mchan->req_buf); - return ret; + return -ENOMEM; } } else if (ret != -ENODEV) { dev_err(mdev, "Unmatched resource %s.\n", name); @@ -559,10 +556,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, mchan->resp_buf_size = resource_size(&res); mchan->resp_buf = devm_ioremap(mdev, res.start, mchan->resp_buf_size); - if (IS_ERR(mchan->resp_buf)) { + if (!mchan->resp_buf) { dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); - ret = PTR_ERR(mchan->resp_buf); - return ret; + return -ENOMEM; } } else if (ret != -ENODEV) { dev_err(mdev, "Unmatched resource %s.\n", name); diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 3d2b63585da9..217c838a1b40 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -264,7 +264,7 @@ struct bcache_device { #define BCACHE_DEV_UNLINK_DONE 2 #define BCACHE_DEV_WB_RUNNING 3 #define BCACHE_DEV_RATE_DW_RUNNING 4 - unsigned int nr_stripes; + int nr_stripes; unsigned int stripe_size; atomic_t *stripe_sectors_dirty; unsigned long *full_dirty_stripes; diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 08768796b543..fda68c00ddd5 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -321,7 +321,7 @@ int bch_btree_keys_alloc(struct btree_keys *b, b->page_order = page_order; - t->data = (void *) __get_free_pages(gfp, b->page_order); + t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order); if (!t->data) goto err; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 46a8b5a91c38..46556bde032e 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -840,7 +840,7 @@ int bch_btree_cache_alloc(struct cache_set *c) mutex_init(&c->verify_lock); c->verify_ondisk = (void *) - __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); + __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c))); c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); @@ -1442,7 +1442,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, if (__set_blocks(n1, n1->keys + n2->keys, block_bytes(b->c)) > btree_blocks(new_nodes[i])) - goto out_nocoalesce; + goto out_unlock_nocoalesce; keys = n2->keys; /* Take the key of the node we're getting rid of */ @@ -1471,7 +1471,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, if (__bch_keylist_realloc(&keylist, bkey_u64s(&new_nodes[i]->key))) - goto out_nocoalesce; + goto out_unlock_nocoalesce; bch_btree_node_write(new_nodes[i], &cl); bch_keylist_add(&keylist, &new_nodes[i]->key); @@ -1517,6 +1517,10 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, /* Invalidated our iterator */ return -EINTR; +out_unlock_nocoalesce: + for (i = 0; i < nodes; i++) + mutex_unlock(&new_nodes[i]->write_lock); + out_nocoalesce: closure_sync(&cl); diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 6730820780b0..8250d2d1d780 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -1002,8 +1002,8 @@ int bch_journal_alloc(struct cache_set *c) j->w[1].c = c; if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || - !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) || - !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS))) + !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) || + !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS))) return -ENOMEM; return 0; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 658b0f4a01f5..2cbfcd99b7ee 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -789,7 +789,9 @@ static void bcache_device_free(struct bcache_device *d) bcache_device_detach(d); if (disk) { - if (disk->flags & GENHD_FL_UP) + bool disk_added = (disk->flags & GENHD_FL_UP) != 0; + + if (disk_added) del_gendisk(disk); if (disk->queue) @@ -797,7 +799,8 @@ static void bcache_device_free(struct bcache_device *d) ida_simple_remove(&bcache_device_idx, first_minor_to_idx(disk->first_minor)); - put_disk(disk); + if (disk_added) + put_disk(disk); } bioset_exit(&d->bio_split); @@ -813,19 +816,19 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, struct request_queue *q; const size_t max_stripes = min_t(size_t, INT_MAX, SIZE_MAX / sizeof(atomic_t)); - size_t n; + uint64_t n; int idx; if (!d->stripe_size) d->stripe_size = 1 << 31; - d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); - - if (!d->nr_stripes || d->nr_stripes > max_stripes) { - pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", - (unsigned int)d->nr_stripes); + n = DIV_ROUND_UP_ULL(sectors, d->stripe_size); + if (!n || n > max_stripes) { + pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n", + n); return -ENOMEM; } + d->nr_stripes = n; n = d->nr_stripes * sizeof(atomic_t); d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); @@ -1751,7 +1754,7 @@ void bch_cache_set_unregister(struct cache_set *c) } #define alloc_bucket_pages(gfp, c) \ - ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) + ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c)))) struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) { @@ -2088,7 +2091,14 @@ found: sysfs_create_link(&c->kobj, &ca->kobj, buf)) goto err; - if (ca->sb.seq > c->sb.seq) { + /* + * A special case is both ca->sb.seq and c->sb.seq are 0, + * such condition happens on a new created cache device whose + * super block is never flushed yet. In this case c->sb.version + * and other members should be updated too, otherwise we will + * have a mistaken super block version in cache set. + */ + if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) { c->sb.version = ca->sb.version; memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); c->sb.flags = ca->sb.flags; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index d60268fe49e1..0b02210ab435 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -519,15 +519,19 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, uint64_t offset, int nr_sectors) { struct bcache_device *d = c->devices[inode]; - unsigned int stripe_offset, stripe, sectors_dirty; + unsigned int stripe_offset, sectors_dirty; + int stripe; if (!d) return; + stripe = offset_to_stripe(d, offset); + if (stripe < 0) + return; + if (UUID_FLASH_ONLY(&c->uuids[inode])) atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); - stripe = offset_to_stripe(d, offset); stripe_offset = offset & (d->stripe_size - 1); while (nr_sectors) { @@ -567,12 +571,12 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k) static void refill_full_stripes(struct cached_dev *dc) { struct keybuf *buf = &dc->writeback_keys; - unsigned int start_stripe, stripe, next_stripe; + unsigned int start_stripe, next_stripe; + int stripe; bool wrapped = false; stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); - - if (stripe >= dc->disk.nr_stripes) + if (stripe < 0) stripe = 0; start_stripe = stripe; diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 4e4c6810dc3c..c4ff76037227 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -33,10 +33,22 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) return ret; } -static inline unsigned int offset_to_stripe(struct bcache_device *d, +static inline int offset_to_stripe(struct bcache_device *d, uint64_t offset) { do_div(offset, d->stripe_size); + + /* d->nr_stripes is in range [1, INT_MAX] */ + if (unlikely(offset >= d->nr_stripes)) { + pr_err("Invalid stripe %llu (>= nr_stripes %d).\n", + offset, d->nr_stripes); + return -EINVAL; + } + + /* + * Here offset is definitly smaller than INT_MAX, + * return it as int will never overflow. + */ return offset; } @@ -44,7 +56,10 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, uint64_t offset, unsigned int nr_sectors) { - unsigned int stripe = offset_to_stripe(&dc->disk, offset); + int stripe = offset_to_stripe(&dc->disk, offset); + + if (stripe < 0) + return false; while (1) { if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 151aa95775be..af6d4f898e4c 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, CACHE_MAX_CONCURRENT_LOCKS); if (IS_ERR(cmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(cmd->bm); + r = PTR_ERR(cmd->bm); + cmd->bm = NULL; + return r; } r = __open_or_format_metadata(cmd, may_format_device); - if (r) + if (r) { dm_block_manager_destroy(cmd->bm); + cmd->bm = NULL; + } return r; } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 492bbe0584d9..1af82fbbac0c 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -720,7 +720,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64)); struct skcipher_request *req; struct scatterlist src, dst; - struct crypto_wait wait; + DECLARE_CRYPTO_WAIT(wait); int err; req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); @@ -2957,7 +2957,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) limits->max_segment_size = PAGE_SIZE; limits->logical_block_size = - max_t(unsigned short, limits->logical_block_size, cc->sector_size); + max_t(unsigned, limits->logical_block_size, cc->sector_size); limits->physical_block_size = max_t(unsigned, limits->physical_block_size, cc->sector_size); limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 56248773a9e0..d6edfe84e749 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2298,7 +2298,7 @@ static void integrity_writer(struct work_struct *w) unsigned prev_free_sectors; /* the following test is not needed, but it tests the replay code */ - if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev) + if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) return; spin_lock_irq(&ic->endio_wait.lock); @@ -2359,12 +2359,13 @@ static void integrity_recalc(struct work_struct *w) next_chunk: - if (unlikely(dm_suspended(ic->ti))) + if (unlikely(dm_post_suspending(ic->ti))) goto unlock_ret; range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); if (unlikely(range.logical_sector >= ic->provided_data_sectors)) { if (ic->mode == 'B') { + block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); DEBUG_print("queue_delayed_work: bitmap_flush_work\n"); queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); } @@ -2442,6 +2443,17 @@ next_chunk: goto err; } + if (ic->mode == 'B') { + sector_t start, end; + start = (range.logical_sector >> + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); + end = ((range.logical_sector + range.n_sectors) >> + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); + block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR); + } + advance_and_next: cond_resched(); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 456d790c918c..54ecfea2cf47 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1190,17 +1190,25 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m) static void flush_multipath_work(struct multipath *m) { if (m->hw_handler_name) { - set_bit(MPATHF_PG_INIT_DISABLED, &m->flags); - smp_mb__after_atomic(); + unsigned long flags; + + if (!atomic_read(&m->pg_init_in_progress)) + goto skip; + + spin_lock_irqsave(&m->lock, flags); + if (atomic_read(&m->pg_init_in_progress) && + !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) { + spin_unlock_irqrestore(&m->lock, flags); - if (atomic_read(&m->pg_init_in_progress)) flush_workqueue(kmpath_handlerd); - multipath_wait_for_pg_init_completion(m); + multipath_wait_for_pg_init_completion(m); - clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); - smp_mb__after_atomic(); + spin_lock_irqsave(&m->lock, flags); + clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); + } + spin_unlock_irqrestore(&m->lock, flags); } - +skip: if (m->queue_mode == DM_TYPE_BIO_BASED) flush_work(&m->process_queued_bios); flush_work(&m->trigger_event); @@ -1856,7 +1864,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, int r; current_pgpath = READ_ONCE(m->current_pgpath); - if (!current_pgpath) + if (!current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) current_pgpath = choose_pgpath(m, 0); if (current_pgpath) { diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 3f8577e2c13b..2bd2444ad99c 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -70,9 +70,6 @@ void dm_start_queue(struct request_queue *q) void dm_stop_queue(struct request_queue *q) { - if (blk_mq_queue_stopped(q)) - return; - blk_mq_quiesce_queue(q); } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 52e049554f5c..edc366075913 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -882,10 +882,14 @@ EXPORT_SYMBOL_GPL(dm_table_set_type); int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - int blocksize = *(int *) data; + int blocksize = *(int *) data, id; + bool rc; - return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize, - start, len); + id = dax_read_lock(); + rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len); + dax_read_unlock(id); + + return rc; } /* Check devices support synchronous DAX */ diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 4cd8868f8004..a5ed59eafdc5 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -739,12 +739,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f THIN_MAX_CONCURRENT_LOCKS); if (IS_ERR(pmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(pmd->bm); + r = PTR_ERR(pmd->bm); + pmd->bm = NULL; + return r; } r = __open_or_format_metadata(pmd, format_device); - if (r) + if (r) { dm_block_manager_destroy(pmd->bm); + pmd->bm = NULL; + } return r; } @@ -954,7 +958,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) } pmd_write_lock_in_core(pmd); - if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) { + if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) { r = __commit_transaction(pmd); if (r < 0) DMWARN("%s: __commit_transaction() failed, error = %d", diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 0d6ca723257f..4e414b06192e 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -224,6 +224,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) pfn_t pfn; int id; struct page **pages; + sector_t offset; wc->memory_vmapped = false; @@ -242,9 +243,16 @@ static int persistent_memory_claim(struct dm_writecache *wc) goto err1; } + offset = get_start_sect(wc->ssd_dev->bdev); + if (offset & (PAGE_SIZE / 512 - 1)) { + r = -EINVAL; + goto err1; + } + offset >>= PAGE_SHIFT - 9; + id = dax_read_lock(); - da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn); + da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn); if (da < 0) { wc->memory_map = NULL; r = da; @@ -266,7 +274,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) i = 0; do { long daa; - daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i, + daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i, NULL, &pfn); if (daa <= 0) { r = daa ? daa : -EINVAL; @@ -279,6 +287,8 @@ static int persistent_memory_claim(struct dm_writecache *wc) while (daa-- && i < p) { pages[i++] = pfn_t_to_page(pfn); pfn.val++; + if (!(i & 15)) + cond_resched(); } } while (i < p); wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL); @@ -805,6 +815,8 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_ writecache_wait_for_ios(wc, WRITE); discarded_something = true; } + if (!writecache_entry_is_committed(wc, e)) + wc->uncommitted_blocks--; writecache_free_entry(wc, e); } @@ -2100,6 +2112,12 @@ invalid_optional: } if (WC_MODE_PMEM(wc)) { + if (!dax_synchronous(wc->ssd_dev->dax_dev)) { + r = -EOPNOTSUPP; + ti->error = "Asynchronous persistent memory not supported as pmem cache"; + goto bad; + } + r = persistent_memory_claim(wc); if (r) { ti->error = "Unable to map persistent memory for cache"; diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index e0a6cf9239f1..e6b0039d07aa 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -1589,7 +1589,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) return dzone; } - return ERR_PTR(-EBUSY); + return NULL; } /* @@ -1609,7 +1609,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) return zone; } - return ERR_PTR(-EBUSY); + return NULL; } /* diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index e7ace908a9b7..d50817320e8e 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -349,8 +349,8 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) /* Get a data zone */ dzone = dmz_get_zone_for_reclaim(zmd); - if (IS_ERR(dzone)) - return PTR_ERR(dzone); + if (!dzone) + return -EBUSY; start = jiffies; diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 03267609b515..6e4f3ef2dd50 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -790,7 +790,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) } /* Set target (no write same support) */ - ti->max_io_len = dev->zone_nr_sectors << 9; + ti->max_io_len = dev->zone_nr_sectors; ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->num_write_zeroes_bios = 1; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1e6e0c970e19..63d59e2ed158 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -140,6 +141,7 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); #define DMF_NOFLUSH_SUSPENDING 5 #define DMF_DEFERRED_REMOVE 6 #define DMF_SUSPENDED_INTERNALLY 7 +#define DMF_POST_SUSPENDING 8 #define DM_NUMA_NODE NUMA_NO_NODE static int dm_numa_node = DM_NUMA_NODE; @@ -1110,15 +1112,16 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd { struct mapped_device *md = dax_get_private(dax_dev); struct dm_table *map; + bool ret = false; int srcu_idx; - bool ret; map = dm_get_live_table(md, &srcu_idx); if (!map) - return false; + goto out; ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); +out: dm_put_live_table(md, srcu_idx); return ret; @@ -1436,9 +1439,6 @@ static int __send_empty_flush(struct clone_info *ci) BUG_ON(bio_has_data(ci->bio)); while ((ti = dm_table_get_target(ci->map, target_nr++))) __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); - - bio_disassociate_blkg(ci->bio); - return 0; } @@ -1626,6 +1626,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, ci.bio = &flush_bio; ci.sector_count = 0; error = __send_empty_flush(&ci); + bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else if (bio_op(bio) == REQ_OP_ZONE_RESET) { ci.bio = bio; @@ -1700,6 +1701,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, ci.bio = &flush_bio; ci.sector_count = 0; error = __send_empty_flush(&ci); + bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else { struct dm_target_io *tio; @@ -2390,6 +2392,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); set_bit(DMF_SUSPENDED, &md->flags); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); } /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ @@ -2712,7 +2715,9 @@ retry: if (r) goto out_unlock; + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); out_unlock: mutex_unlock(&md->suspend_lock); @@ -2809,7 +2814,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, DMF_SUSPENDED_INTERNALLY); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); } static void __dm_internal_resume(struct mapped_device *md) @@ -2886,17 +2893,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast); int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie) { + int r; + unsigned noio_flag; char udev_cookie[DM_COOKIE_LENGTH]; char *envp[] = { udev_cookie, NULL }; + noio_flag = memalloc_noio_save(); + if (!cookie) - return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); + r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); else { snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", DM_COOKIE_ENV_VAR_NAME, cookie); - return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, - action, envp); + r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, + action, envp); } + + memalloc_noio_restore(noio_flag); + + return r; } uint32_t dm_next_uevent_seq(struct mapped_device *md) @@ -2962,6 +2977,11 @@ int dm_suspended_md(struct mapped_device *md) return test_bit(DMF_SUSPENDED, &md->flags); } +static int dm_post_suspending_md(struct mapped_device *md) +{ + return test_bit(DMF_POST_SUSPENDING, &md->flags); +} + int dm_suspended_internally_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); @@ -2978,6 +2998,12 @@ int dm_suspended(struct dm_target *ti) } EXPORT_SYMBOL_GPL(dm_suspended); +int dm_post_suspending(struct dm_target *ti) +{ + return dm_post_suspending_md(dm_table_get_md(ti->table)); +} +EXPORT_SYMBOL_GPL(dm_post_suspending); + int dm_noflush_suspending(struct dm_target *ti) { return __noflush_suspending(dm_table_get_md(ti->table)); diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 813a99ffa86f..d50737ec4039 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -1139,6 +1139,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz bitmap = get_bitmap_from_slot(mddev, i); if (IS_ERR(bitmap)) { pr_err("can't get bitmap from slot %d\n", i); + bitmap = NULL; goto out; } counts = &bitmap->counts; @@ -1518,6 +1519,7 @@ static void unlock_all_bitmaps(struct mddev *mddev) } } kfree(cinfo->other_bitmap_lockres); + cinfo->other_bitmap_lockres = NULL; } } diff --git a/drivers/md/md.c b/drivers/md/md.c index 6b69a12ca2d8..acef01e519d0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -376,6 +376,11 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) struct mddev *mddev = q->queuedata; unsigned int sectors; + if (mddev == NULL || mddev->pers == NULL) { + bio_io_error(bio); + return BLK_QC_T_NONE; + } + if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { bio_io_error(bio); return BLK_QC_T_NONE; @@ -383,10 +388,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) blk_queue_split(q, &bio); - if (mddev == NULL || mddev->pers == NULL) { - bio_io_error(bio); - return BLK_QC_T_NONE; - } if (mddev->ro == 1 && unlikely(rw == WRITE)) { if (bio_sectors(bio) != 0) bio->bi_status = BLK_STS_IOERR; @@ -7607,7 +7608,8 @@ static int md_open(struct block_device *bdev, fmode_t mode) */ mddev_put(mddev); /* Wait until bdev->bd_disk is definitely gone */ - flush_workqueue(md_misc_wq); + if (work_pending(&mddev->del_work)) + flush_workqueue(md_misc_wq); /* Then retry the open from the top */ return -ERESTARTSYS; } diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 749ec268d957..54c089a50b15 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -493,7 +493,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm, void *p; int r; - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); @@ -562,7 +562,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, struct buffer_aux *aux; void *p; - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result); @@ -602,7 +602,7 @@ EXPORT_SYMBOL_GPL(dm_bm_unlock); int dm_bm_flush(struct dm_block_manager *bm) { - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; return dm_bufio_write_dirty_buffers(bm->bufio); @@ -616,19 +616,21 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b) bool dm_bm_is_read_only(struct dm_block_manager *bm) { - return bm->read_only; + return (bm ? bm->read_only : true); } EXPORT_SYMBOL_GPL(dm_bm_is_read_only); void dm_bm_set_read_only(struct dm_block_manager *bm) { - bm->read_only = true; + if (bm) + bm->read_only = true; } EXPORT_SYMBOL_GPL(dm_bm_set_read_only); void dm_bm_set_read_write(struct dm_block_manager *bm) { - bm->read_only = false; + if (bm) + bm->read_only = false; } EXPORT_SYMBOL_GPL(dm_bm_set_read_write); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 36cd7c2fbf40..02acd5d5a848 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2228,14 +2228,19 @@ static int grow_stripes(struct r5conf *conf, int num) * of the P and Q blocks. */ static int scribble_alloc(struct raid5_percpu *percpu, - int num, int cnt, gfp_t flags) + int num, int cnt) { size_t obj_size = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); void *scribble; - scribble = kvmalloc_array(cnt, obj_size, flags); + /* + * If here is in raid array suspend context, it is in memalloc noio + * context as well, there is no potential recursive memory reclaim + * I/Os with the GFP_KERNEL flag. + */ + scribble = kvmalloc_array(cnt, obj_size, GFP_KERNEL); if (!scribble) return -ENOMEM; @@ -2267,8 +2272,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) percpu = per_cpu_ptr(conf->percpu, cpu); err = scribble_alloc(percpu, new_disks, - new_sectors / STRIPE_SECTORS, - GFP_NOIO); + new_sectors / STRIPE_SECTORS); if (err) break; } @@ -3600,6 +3604,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, * is missing/faulty, then we need to read everything we can. */ if (sh->raid_conf->level != 6 && + sh->raid_conf->rmw_level != PARITY_DISABLE_RMW && sh->sector < sh->raid_conf->mddev->recovery_cp) /* reconstruct-write isn't being forced */ return 0; @@ -4835,7 +4840,7 @@ static void handle_stripe(struct stripe_head *sh) * or to load a block that is being partially written. */ if (s.to_read || s.non_overwrite - || (conf->level == 6 && s.to_write && s.failed) + || (s.to_write && s.failed) || (s.syncing && (s.uptodate + s.compute < disks)) || s.replacing || s.expanding) @@ -6765,8 +6770,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu conf->previous_raid_disks), max(conf->chunk_sectors, conf->prev_chunk_sectors) - / STRIPE_SECTORS, - GFP_KERNEL)) { + / STRIPE_SECTORS)) { free_scratch_buffer(conf, percpu); return -ENOMEM; } diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index b14c09cd9593..06383b26712b 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -1732,6 +1732,10 @@ int __cec_s_log_addrs(struct cec_adapter *adap, unsigned j; log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID; + if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) { + dprintk(1, "unknown logical address type\n"); + return -EINVAL; + } if (type_mask & (1 << log_addrs->log_addr_type[i])) { dprintk(1, "duplicate logical address type\n"); return -EINVAL; @@ -1752,10 +1756,6 @@ int __cec_s_log_addrs(struct cec_adapter *adap, dprintk(1, "invalid primary device type\n"); return -EINVAL; } - if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) { - dprintk(1, "unknown logical address type\n"); - return -EINVAL; - } for (j = 0; j < feature_sz; j++) { if ((features[j] & 0x80) == 0) { if (op_is_dev_features) diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c index 12d676484472..ed75636a6fb3 100644 --- a/drivers/media/cec/cec-api.c +++ b/drivers/media/cec/cec-api.c @@ -147,7 +147,13 @@ static long cec_adap_g_log_addrs(struct cec_adapter *adap, struct cec_log_addrs log_addrs; mutex_lock(&adap->lock); - log_addrs = adap->log_addrs; + /* + * We use memcpy here instead of assignment since there is a + * hole at the end of struct cec_log_addrs that an assignment + * might ignore. So when we do copy_to_user() we could leak + * one byte of memory. + */ + memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs)); if (!adap->is_configured) memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID, sizeof(log_addrs.log_addr)); diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index 917fe034af37..032b6d7dd582 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -707,9 +707,10 @@ int dvb_create_media_graph(struct dvb_adapter *adap, } if (ntuner && ndemod) { - pad_source = media_get_pad_index(tuner, true, + /* NOTE: first found tuner source pad presumed correct */ + pad_source = media_get_pad_index(tuner, false, PAD_SIGNAL_ANALOG); - if (pad_source) + if (pad_source < 0) return -EINVAL; ret = media_create_pad_links(mdev, MEDIA_ENT_F_TUNER, diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c index 97144734eb05..3f1ca40b9b98 100644 --- a/drivers/media/firewire/firedtv-fw.c +++ b/drivers/media/firewire/firedtv-fw.c @@ -272,6 +272,8 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) name_len = fw_csr_string(unit->directory, CSR_MODEL, name, sizeof(name)); + if (name_len < 0) + return name_len; for (i = ARRAY_SIZE(model_names); --i; ) if (strlen(model_names[i]) <= name_len && strncmp(name, model_names[i], name_len) == 0) diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index d6bb8aad4654..a644f9ab4e05 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -3171,8 +3171,8 @@ static int ov5640_probe(struct i2c_client *client) free_ctrls: v4l2_ctrl_handler_free(&sensor->ctrls.handler); entity_cleanup: - mutex_destroy(&sensor->lock); media_entity_cleanup(&sensor->sd.entity); + mutex_destroy(&sensor->lock); return ret; } @@ -3182,9 +3182,9 @@ static int ov5640_remove(struct i2c_client *client) struct ov5640_dev *sensor = to_ov5640_dev(sd); v4l2_async_unregister_subdev(&sensor->sd); - mutex_destroy(&sensor->lock); media_entity_cleanup(&sensor->sd.entity); v4l2_ctrl_handler_free(&sensor->ctrls.handler); + mutex_destroy(&sensor->lock); return 0; } diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c index e3fca436c75b..c0782fd96c59 100644 --- a/drivers/media/mc/mc-request.c +++ b/drivers/media/mc/mc-request.c @@ -296,9 +296,18 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd) if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free)) return -ENOMEM; + if (mdev->ops->req_alloc) + req = mdev->ops->req_alloc(mdev); + else + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + fd = get_unused_fd_flags(O_CLOEXEC); - if (fd < 0) - return fd; + if (fd < 0) { + ret = fd; + goto err_free_req; + } filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC); if (IS_ERR(filp)) { @@ -306,15 +315,6 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd) goto err_put_fd; } - if (mdev->ops->req_alloc) - req = mdev->ops->req_alloc(mdev); - else - req = kzalloc(sizeof(*req), GFP_KERNEL); - if (!req) { - ret = -ENOMEM; - goto err_fput; - } - filp->private_data = req; req->mdev = mdev; req->state = MEDIA_REQUEST_STATE_IDLE; @@ -336,12 +336,15 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd) return 0; -err_fput: - fput(filp); - err_put_fd: put_unused_fd(fd); +err_free_req: + if (mdev->ops->req_free) + mdev->ops->req_free(req); + else + kfree(req); + return ret; } diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c index e880afe37f15..d59ca3601785 100644 --- a/drivers/media/pci/cx23885/cx23888-ir.c +++ b/drivers/media/pci/cx23885/cx23888-ir.c @@ -1167,8 +1167,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev) return -ENOMEM; spin_lock_init(&state->rx_kfifo_lock); - if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL)) + if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, + GFP_KERNEL)) { + kfree(state); return -ENOMEM; + } state->dev = dev; sd = &state->sd; diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index d0cdee1c6eb0..bf36b1e22b63 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c @@ -406,14 +406,15 @@ static void debiirq(unsigned long cookie) case DATA_CI_GET: { u8 *data = av7110->debi_virt; + u8 data_0 = data[0]; - if ((data[0] < 2) && data[2] == 0xff) { + if (data_0 < 2 && data[2] == 0xff) { int flags = 0; if (data[5] > 0) flags |= CA_CI_MODULE_PRESENT; if (data[5] > 5) flags |= CA_CI_MODULE_READY; - av7110->ci_slot[data[0]].flags = flags; + av7110->ci_slot[data_0].flags = flags; } else ci_get_data(&av7110->ci_rbuffer, av7110->debi_virt, diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c index fadbdeeb4495..293867b9e796 100644 --- a/drivers/media/pci/ttpci/budget-core.c +++ b/drivers/media/pci/ttpci/budget-core.c @@ -369,20 +369,25 @@ static int budget_register(struct budget *budget) ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend); if (ret < 0) - return ret; + goto err_release_dmx; budget->mem_frontend.source = DMX_MEMORY_FE; ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend); if (ret < 0) - return ret; + goto err_release_dmx; ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend); if (ret < 0) - return ret; + goto err_release_dmx; dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx); return 0; + +err_release_dmx: + dvb_dmxdev_release(&budget->dmxdev); + dvb_dmx_release(&budget->demux); + return ret; } static void budget_unregister(struct budget *budget) diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c index 4a3b3810fd89..31390ce2dbf2 100644 --- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c +++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c @@ -278,11 +278,7 @@ static int cros_ec_cec_probe(struct platform_device *pdev) platform_set_drvdata(pdev, cros_ec_cec); cros_ec_cec->cros_ec = cros_ec; - ret = device_init_wakeup(&pdev->dev, 1); - if (ret) { - dev_err(&pdev->dev, "failed to initialize wakeup\n"); - return ret; - } + device_init_wakeup(&pdev->dev, 1); cros_ec_cec->adap = cec_allocate_adapter(&cros_ec_cec_ops, cros_ec_cec, DRV_NAME, diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index 71f4fe882d13..74f68ac3c9a7 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -1482,8 +1482,6 @@ probe_out: /* Unregister video device */ video_unregister_device(&ch->video_dev); } - kfree(vpif_obj.sd); - v4l2_device_unregister(&vpif_obj.v4l2_dev); return err; } diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c index d38d2bbb6f0f..7000f0bf0b35 100644 --- a/drivers/media/platform/davinci/vpss.c +++ b/drivers/media/platform/davinci/vpss.c @@ -505,19 +505,31 @@ static void vpss_exit(void) static int __init vpss_init(void) { + int ret; + if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control")) return -EBUSY; oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4); if (unlikely(!oper_cfg.vpss_regs_base2)) { - release_mem_region(VPSS_CLK_CTRL, 4); - return -ENOMEM; + ret = -ENOMEM; + goto err_ioremap; } writel(VPSS_CLK_CTRL_VENCCLKEN | - VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); + VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); - return platform_driver_register(&vpss_driver); + ret = platform_driver_register(&vpss_driver); + if (ret) + goto err_pd_register; + + return 0; + +err_pd_register: + iounmap(oper_cfg.vpss_regs_base2); +err_ioremap: + release_mem_region(VPSS_CLK_CTRL, 4); + return ret; } subsys_initcall(vpss_init); module_exit(vpss_exit); diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index 9aaf3b8060d5..9c31d950cddf 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -1270,6 +1270,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd) pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl, PINCTRL_STATE_IDLE); + if (IS_ERR(pctl->state_idle)) + return PTR_ERR(pctl->state_idle); + return 0; } diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c index 803baf97f06e..6de8b3d99fb9 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.c +++ b/drivers/media/platform/marvell-ccic/mcam-core.c @@ -1940,6 +1940,7 @@ int mccic_register(struct mcam_camera *cam) out: v4l2_async_notifier_unregister(&cam->notifier); v4l2_device_unregister(&cam->v4l2_dev); + v4l2_async_notifier_cleanup(&cam->notifier); return ret; } EXPORT_SYMBOL_GPL(mccic_register); @@ -1961,6 +1962,7 @@ void mccic_shutdown(struct mcam_camera *cam) v4l2_ctrl_handler_free(&cam->ctrl_handler); v4l2_async_notifier_unregister(&cam->notifier); v4l2_device_unregister(&cam->v4l2_dev); + v4l2_async_notifier_cleanup(&cam->notifier); } EXPORT_SYMBOL_GPL(mccic_shutdown); diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c index 97d660606d98..b8c2b8bba826 100644 --- a/drivers/media/platform/omap3isp/isppreview.c +++ b/drivers/media/platform/omap3isp/isppreview.c @@ -2287,7 +2287,7 @@ static int preview_init_entities(struct isp_prev_device *prev) me->ops = &preview_media_ops; ret = media_entity_pads_init(me, PREV_PADS_NUM, pads); if (ret < 0) - return ret; + goto error_handler_free; preview_init_formats(sd, NULL); @@ -2320,6 +2320,8 @@ error_video_out: omap3isp_video_cleanup(&prev->video_in); error_video_in: media_entity_cleanup(&prev->subdev.entity); +error_handler_free: + v4l2_ctrl_handler_free(&prev->ctrls); return ret; } diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c index 3fdc9f964a3c..2483641799df 100644 --- a/drivers/media/platform/qcom/camss/camss.c +++ b/drivers/media/platform/qcom/camss/camss.c @@ -504,7 +504,6 @@ static int camss_of_parse_ports(struct camss *camss) return num_subdevs; err_cleanup: - v4l2_async_notifier_cleanup(&camss->notifier); of_node_put(node); return ret; } @@ -835,29 +834,38 @@ static int camss_probe(struct platform_device *pdev) camss->csid_num = 4; camss->vfe_num = 2; } else { - return -EINVAL; + ret = -EINVAL; + goto err_free; } camss->csiphy = devm_kcalloc(dev, camss->csiphy_num, sizeof(*camss->csiphy), GFP_KERNEL); - if (!camss->csiphy) - return -ENOMEM; + if (!camss->csiphy) { + ret = -ENOMEM; + goto err_free; + } camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid), GFP_KERNEL); - if (!camss->csid) - return -ENOMEM; + if (!camss->csid) { + ret = -ENOMEM; + goto err_free; + } camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL); - if (!camss->vfe) - return -ENOMEM; + if (!camss->vfe) { + ret = -ENOMEM; + goto err_free; + } v4l2_async_notifier_init(&camss->notifier); num_subdevs = camss_of_parse_ports(camss); - if (num_subdevs < 0) - return num_subdevs; + if (num_subdevs < 0) { + ret = num_subdevs; + goto err_cleanup; + } ret = camss_init_subdevices(camss); if (ret < 0) @@ -936,6 +944,8 @@ err_register_entities: v4l2_device_unregister(&camss->v4l2_dev); err_cleanup: v4l2_async_notifier_cleanup(&camss->notifier); +err_free: + kfree(camss); return ret; } diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c index 43c78620c9d8..5c6b00737fe7 100644 --- a/drivers/media/platform/rcar-fcp.c +++ b/drivers/media/platform/rcar-fcp.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -21,6 +22,7 @@ struct rcar_fcp_device { struct list_head list; struct device *dev; + struct device_dma_parameters dma_parms; }; static LIST_HEAD(fcp_devices); @@ -136,6 +138,9 @@ static int rcar_fcp_probe(struct platform_device *pdev) fcp->dev = &pdev->dev; + fcp->dev->dma_parms = &fcp->dma_parms; + dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32)); + pm_runtime_enable(&pdev->dev); mutex_lock(&fcp_lock); diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c index 4be6dcf292ff..aaa96f256356 100644 --- a/drivers/media/platform/rockchip/rga/rga-hw.c +++ b/drivers/media/platform/rockchip/rga/rga-hw.c @@ -200,22 +200,25 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx) dst_info.data.format = ctx->out.fmt->hw_format; dst_info.data.swap = ctx->out.fmt->color_swap; - if (ctx->in.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) { - if (ctx->out.fmt->hw_format < RGA_COLOR_FMT_YUV422SP) { - switch (ctx->in.colorspace) { - case V4L2_COLORSPACE_REC709: - src_info.data.csc_mode = - RGA_SRC_CSC_MODE_BT709_R0; - break; - default: - src_info.data.csc_mode = - RGA_SRC_CSC_MODE_BT601_R0; - break; - } + /* + * CSC mode must only be set when the colorspace families differ between + * input and output. It must remain unset (zeroed) if both are the same. + */ + + if (RGA_COLOR_FMT_IS_YUV(ctx->in.fmt->hw_format) && + RGA_COLOR_FMT_IS_RGB(ctx->out.fmt->hw_format)) { + switch (ctx->in.colorspace) { + case V4L2_COLORSPACE_REC709: + src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0; + break; + default: + src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT601_R0; + break; } } - if (ctx->out.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) { + if (RGA_COLOR_FMT_IS_RGB(ctx->in.fmt->hw_format) && + RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) { switch (ctx->out.colorspace) { case V4L2_COLORSPACE_REC709: dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0; diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h index 96cb0314dfa7..e8917e5630a4 100644 --- a/drivers/media/platform/rockchip/rga/rga-hw.h +++ b/drivers/media/platform/rockchip/rga/rga-hw.h @@ -95,6 +95,11 @@ #define RGA_COLOR_FMT_CP_8BPP 15 #define RGA_COLOR_FMT_MASK 15 +#define RGA_COLOR_FMT_IS_YUV(fmt) \ + (((fmt) >= RGA_COLOR_FMT_YUV422SP) && ((fmt) < RGA_COLOR_FMT_CP_1BPP)) +#define RGA_COLOR_FMT_IS_RGB(fmt) \ + ((fmt) < RGA_COLOR_FMT_YUV422SP) + #define RGA_COLOR_NONE_SWAP 0 #define RGA_COLOR_RB_SWAP 1 #define RGA_COLOR_ALPHA_SWAP 2 diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c index 82350097503e..c77281d43f89 100644 --- a/drivers/media/platform/vicodec/vicodec-core.c +++ b/drivers/media/platform/vicodec/vicodec-core.c @@ -2052,6 +2052,7 @@ static int vicodec_request_validate(struct media_request *req) } ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, vicodec_ctrl_stateless_state.id); + v4l2_ctrl_request_hdl_put(hdl); if (!ctrl) { v4l2_info(&ctx->dev->v4l2_dev, "Missing required codec control\n"); @@ -2172,16 +2173,19 @@ static int vicodec_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); - if (register_instance(dev, &dev->stateful_enc, - "stateful-encoder", true)) + ret = register_instance(dev, &dev->stateful_enc, "stateful-encoder", + true); + if (ret) goto unreg_dev; - if (register_instance(dev, &dev->stateful_dec, - "stateful-decoder", false)) + ret = register_instance(dev, &dev->stateful_dec, "stateful-decoder", + false); + if (ret) goto unreg_sf_enc; - if (register_instance(dev, &dev->stateless_dec, - "stateless-decoder", false)) + ret = register_instance(dev, &dev->stateless_dec, "stateless-decoder", + false); + if (ret) goto unreg_sf_dec; #ifdef CONFIG_MEDIA_CONTROLLER diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c index d7b43037e500..e07b135613eb 100644 --- a/drivers/media/platform/vsp1/vsp1_dl.c +++ b/drivers/media/platform/vsp1/vsp1_dl.c @@ -431,6 +431,8 @@ vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type, if (!pool) return NULL; + pool->vsp1 = vsp1; + spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->free); diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c index 18ca12d78314..66703989ae18 100644 --- a/drivers/media/rc/gpio-ir-tx.c +++ b/drivers/media/rc/gpio-ir-tx.c @@ -79,13 +79,8 @@ static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf, // space edge = ktime_add_us(edge, txbuf[i]); delta = ktime_us_delta(edge, ktime_get()); - if (delta > 10) { - spin_unlock_irqrestore(&gpio_ir->lock, flags); - usleep_range(delta, delta + 10); - spin_lock_irqsave(&gpio_ir->lock, flags); - } else if (delta > 0) { + if (delta > 0) udelay(delta); - } } else { // pulse ktime_t last = ktime_add_us(edge, txbuf[i]); diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 6f80c251f641..e84f9dccf448 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -1256,6 +1256,10 @@ static ssize_t store_protocols(struct device *device, } mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } old_protocols = *current_protocols; new_protocols = old_protocols; @@ -1394,6 +1398,10 @@ static ssize_t store_filter(struct device *device, return -EINVAL; mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } new_filter = *filter; if (fattr->mask) @@ -1508,6 +1516,10 @@ static ssize_t store_wakeup_protocols(struct device *device, int i; mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } allowed = dev->allowed_wakeup_protocols; @@ -1565,25 +1577,25 @@ static void rc_dev_release(struct device *device) kfree(dev); } -#define ADD_HOTPLUG_VAR(fmt, val...) \ - do { \ - int err = add_uevent_var(env, fmt, val); \ - if (err) \ - return err; \ - } while (0) - static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env) { struct rc_dev *dev = to_rc_dev(device); + int ret = 0; - if (dev->rc_map.name) - ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name); - if (dev->driver_name) - ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name); - if (dev->device_name) - ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name); + mutex_lock(&dev->lock); - return 0; + if (!dev->registered) + ret = -ENODEV; + if (ret == 0 && dev->rc_map.name) + ret = add_uevent_var(env, "NAME=%s", dev->rc_map.name); + if (ret == 0 && dev->driver_name) + ret = add_uevent_var(env, "DRV_NAME=%s", dev->driver_name); + if (ret == 0 && dev->device_name) + ret = add_uevent_var(env, "DEV_NAME=%s", dev->device_name); + + mutex_unlock(&dev->lock); + + return ret; } /* @@ -1975,14 +1987,14 @@ void rc_unregister_device(struct rc_dev *dev) del_timer_sync(&dev->timer_keyup); del_timer_sync(&dev->timer_repeat); - rc_free_rx_device(dev); - mutex_lock(&dev->lock); if (dev->users && dev->close) dev->close(dev); dev->registered = false; mutex_unlock(&dev->lock); + rc_free_rx_device(dev); + /* * lirc device should be freed with dev->registered = false, so * that userspace polling will get notified. diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c index e87040d6eca7..a39e1966816b 100644 --- a/drivers/media/tuners/si2157.c +++ b/drivers/media/tuners/si2157.c @@ -75,24 +75,23 @@ static int si2157_init(struct dvb_frontend *fe) struct si2157_cmd cmd; const struct firmware *fw; const char *fw_name; - unsigned int uitmp, chip_id; + unsigned int chip_id, xtal_trim; dev_dbg(&client->dev, "\n"); - /* Returned IF frequency is garbage when firmware is not running */ - memcpy(cmd.args, "\x15\x00\x06\x07", 4); + /* Try to get Xtal trim property, to verify tuner still running */ + memcpy(cmd.args, "\x15\x00\x04\x02", 4); cmd.wlen = 4; cmd.rlen = 4; ret = si2157_cmd_execute(client, &cmd); - if (ret) - goto err; - uitmp = cmd.args[2] << 0 | cmd.args[3] << 8; - dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp); + xtal_trim = cmd.args[2] | (cmd.args[3] << 8); - if (uitmp == dev->if_frequency / 1000) + if (ret == 0 && xtal_trim < 16) goto warm; + dev->if_frequency = 0; /* we no longer know current tuner state */ + /* power up */ if (dev->chiptype == SI2157_CHIPTYPE_SI2146) { memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9); diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig index 1a3e5f965ae4..2d7a5c1c84af 100644 --- a/drivers/media/usb/dvb-usb/Kconfig +++ b/drivers/media/usb/dvb-usb/Kconfig @@ -150,6 +150,7 @@ config DVB_USB_CXUSB config DVB_USB_CXUSB_ANALOG bool "Analog support for the Conexant USB2.0 hybrid reference design" depends on DVB_USB_CXUSB && VIDEO_V4L2 + depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_USB_CXUSB select VIDEO_CX25840 select VIDEOBUF2_VMALLOC help diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c index d4ea72bf09c5..5131c8d4c632 100644 --- a/drivers/media/usb/dvb-usb/dibusb-mb.c +++ b/drivers/media/usb/dvb-usb/dibusb-mb.c @@ -81,7 +81,7 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap) if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) { err("tuner i2c write failed."); - ret = -EREMOTEIO; + return -EREMOTEIO; } if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl) diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c index b05fa227ffb2..95756cbb722f 100644 --- a/drivers/media/usb/go7007/snd-go7007.c +++ b/drivers/media/usb/go7007/snd-go7007.c @@ -236,22 +236,18 @@ int go7007_snd_init(struct go7007 *go) gosnd->capturing = 0; ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0, &gosnd->card); - if (ret < 0) { - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_snd; + ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go, &go7007_snd_device_ops); - if (ret < 0) { - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_card; + ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm); - if (ret < 0) { - snd_card_free(gosnd->card); - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_card; + strscpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver)); strscpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->shortname)); strscpy(gosnd->card->longname, gosnd->card->shortname, @@ -262,11 +258,8 @@ int go7007_snd_init(struct go7007 *go) &go7007_snd_capture_ops); ret = snd_card_register(gosnd->card); - if (ret < 0) { - snd_card_free(gosnd->card); - kfree(gosnd); - return ret; - } + if (ret < 0) + goto free_card; gosnd->substream = NULL; go->snd_context = gosnd; @@ -274,6 +267,12 @@ int go7007_snd_init(struct go7007 *go) ++dev; return 0; + +free_card: + snd_card_free(gosnd->card); +free_snd: + kfree(gosnd); + return ret; } EXPORT_SYMBOL(go7007_snd_init); diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index f0737c57ed5f..1491561d2e5c 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -118,8 +118,6 @@ int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state); int mptscsih_resume(struct pci_dev *pdev); #endif -#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -2422,7 +2420,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR /* Copy the sense received into the scsi command block. */ req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); - memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc)); + memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC); /* Log SMART data (asc = 0x5D, non-IM case only) if required. */ diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index 4a31907a4525..3ff872c205ee 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -1430,6 +1430,15 @@ err_irq: arizona_irq_exit(arizona); err_pm: pm_runtime_disable(arizona->dev); + + switch (arizona->pdata.clk32k_src) { + case ARIZONA_32KZ_MCLK1: + case ARIZONA_32KZ_MCLK2: + arizona_clk32k_disable(arizona); + break; + default: + break; + } err_reset: arizona_enable_reset(arizona); regulator_disable(arizona->dcvdd); @@ -1452,6 +1461,15 @@ int arizona_dev_exit(struct arizona *arizona) regulator_disable(arizona->dcvdd); regulator_put(arizona->dcvdd); + switch (arizona->pdata.clk32k_src) { + case ARIZONA_32KZ_MCLK1: + case ARIZONA_32KZ_MCLK2: + arizona_clk32k_disable(arizona); + break; + default: + break; + } + mfd_remove_devices(arizona->dev); arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona); arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona); diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c index 4faa8d2e5d04..707f4287ab4a 100644 --- a/drivers/mfd/dln2.c +++ b/drivers/mfd/dln2.c @@ -287,7 +287,11 @@ static void dln2_rx(struct urb *urb) len = urb->actual_length - sizeof(struct dln2_header); if (handle == DLN2_HANDLE_EVENT) { + unsigned long flags; + + spin_lock_irqsave(&dln2->event_cb_lock, flags); dln2_run_event_callbacks(dln2, id, echo, data, len); + spin_unlock_irqrestore(&dln2->event_cb_lock, flags); } else { /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */ if (dln2_transfer_complete(dln2, urb, handle, echo)) diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index b33030e3385c..da91965b8f7b 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -196,6 +196,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info }, + /* EBG */ + { PCI_VDEVICE(INTEL, 0x1bad), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x1bae), (kernel_ulong_t)&bxt_uart_info }, /* GLK */ { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info }, { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info }, @@ -225,6 +228,22 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x34ea), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&spt_info }, + /* TGL-H */ + { PCI_VDEVICE(INTEL, 0x43a7), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x43a8), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x43a9), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x43ad), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43ae), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43d8), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43da), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x43e8), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43e9), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43ea), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43eb), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&bxt_info }, /* EHL */ { PCI_VDEVICE(INTEL, 0x4b28), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x4b29), (kernel_ulong_t)&bxt_uart_info }, diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c index 857991cb3cbb..711979afd90a 100644 --- a/drivers/mfd/stmfx.c +++ b/drivers/mfd/stmfx.c @@ -287,14 +287,21 @@ static int stmfx_irq_init(struct i2c_client *client) ret = regmap_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN, irqoutpin); if (ret) - return ret; + goto irq_exit; ret = devm_request_threaded_irq(stmfx->dev, client->irq, NULL, stmfx_irq_handler, irqtrigger | IRQF_ONESHOT, "stmfx", stmfx); if (ret) - stmfx_irq_exit(client); + goto irq_exit; + + stmfx->irq = client->irq; + + return 0; + +irq_exit: + stmfx_irq_exit(client); return ret; } @@ -481,6 +488,8 @@ static int stmfx_suspend(struct device *dev) if (ret) return ret; + disable_irq(stmfx->irq); + if (stmfx->vdd) return regulator_disable(stmfx->vdd); @@ -501,6 +510,13 @@ static int stmfx_resume(struct device *dev) } } + /* Reset STMFX - supply has been stopped during suspend */ + ret = stmfx_chip_reset(stmfx); + if (ret) { + dev_err(stmfx->dev, "Failed to reset chip: %d\n", ret); + return ret; + } + ret = regmap_raw_write(stmfx->map, STMFX_REG_SYS_CTRL, &stmfx->bkp_sysctrl, sizeof(stmfx->bkp_sysctrl)); if (ret) @@ -517,6 +533,8 @@ static int stmfx_resume(struct device *dev) if (ret) return ret; + enable_irq(stmfx->irq); + return 0; } #endif diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c index 1e9fe7d92597..737dede4a95c 100644 --- a/drivers/mfd/wm8994-core.c +++ b/drivers/mfd/wm8994-core.c @@ -690,3 +690,4 @@ module_i2c_driver(wm8994_i2c_driver); MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mark Brown "); +MODULE_SOFTDEP("pre: wm8994_regulator"); diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index ab4144ea1f11..d6cd5537126c 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include @@ -20,7 +20,7 @@ #include "../../sound/soc/atmel/atmel_ssc_dai.h" /* Serialize access to ssc_list and user count */ -static DEFINE_SPINLOCK(user_lock); +static DEFINE_MUTEX(user_lock); static LIST_HEAD(ssc_list); struct ssc_device *ssc_request(unsigned int ssc_num) @@ -28,7 +28,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num) int ssc_valid = 0; struct ssc_device *ssc; - spin_lock(&user_lock); + mutex_lock(&user_lock); list_for_each_entry(ssc, &ssc_list, list) { if (ssc->pdev->dev.of_node) { if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc") @@ -44,18 +44,18 @@ struct ssc_device *ssc_request(unsigned int ssc_num) } if (!ssc_valid) { - spin_unlock(&user_lock); + mutex_unlock(&user_lock); pr_err("ssc: ssc%d platform device is missing\n", ssc_num); return ERR_PTR(-ENODEV); } if (ssc->user) { - spin_unlock(&user_lock); + mutex_unlock(&user_lock); dev_dbg(&ssc->pdev->dev, "module busy\n"); return ERR_PTR(-EBUSY); } ssc->user++; - spin_unlock(&user_lock); + mutex_unlock(&user_lock); clk_prepare(ssc->clk); @@ -67,14 +67,14 @@ void ssc_free(struct ssc_device *ssc) { bool disable_clk = true; - spin_lock(&user_lock); + mutex_lock(&user_lock); if (ssc->user) ssc->user--; else { disable_clk = false; dev_dbg(&ssc->pdev->dev, "device already free\n"); } - spin_unlock(&user_lock); + mutex_unlock(&user_lock); if (disable_clk) clk_unprepare(ssc->clk); @@ -237,9 +237,9 @@ static int ssc_probe(struct platform_device *pdev) return -ENXIO; } - spin_lock(&user_lock); + mutex_lock(&user_lock); list_add_tail(&ssc->list, &ssc_list); - spin_unlock(&user_lock); + mutex_unlock(&user_lock); platform_set_drvdata(pdev, ssc); @@ -258,9 +258,9 @@ static int ssc_remove(struct platform_device *pdev) ssc_sound_dai_remove(ssc); - spin_lock(&user_lock); + mutex_lock(&user_lock); list_del(&ssc->list); - spin_unlock(&user_lock); + mutex_unlock(&user_lock); return 0; } diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index f0263d1a1fdf..d97a243ad30c 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -624,7 +624,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type, &afu->dev.kobj, "cr%i", cr->cr); if (rc) - goto err; + goto err1; rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr); if (rc) diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 842f2210dc7e..3a5d2890fe2a 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -886,6 +886,7 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, struct fastrpc_channel_ctx *cctx; struct fastrpc_user *fl = ctx->fl; struct fastrpc_msg *msg = &ctx->msg; + int ret; cctx = fl->cctx; msg->pid = fl->tgid; @@ -901,7 +902,13 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, msg->size = roundup(ctx->msg_sz, PAGE_SIZE); fastrpc_context_get(ctx); - return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); + ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); + + if (ret) + fastrpc_context_put(ctx); + + return ret; + } static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, @@ -1434,8 +1441,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) domains[domain_id]); data->miscdev.fops = &fastrpc_fops; err = misc_register(&data->miscdev); - if (err) + if (err) { + kfree(data); return err; + } kref_init(&data->refcount); diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c index ea2ca67fbfbf..153858475abc 100644 --- a/drivers/misc/habanalabs/firmware_if.c +++ b/drivers/misc/habanalabs/firmware_if.c @@ -11,6 +11,7 @@ #include #include +#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */ /** * hl_fw_push_fw_to_device() - Push FW code to device. * @hdev: pointer to hl_device structure. @@ -43,6 +44,14 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name, dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size); + if (fw_size > FW_FILE_MAX_SIZE) { + dev_err(hdev->dev, + "FW file size %zu exceeds maximum of %u bytes\n", + fw_size, FW_FILE_MAX_SIZE); + rc = -EINVAL; + goto out; + } + fw_data = (const u64 *) fw->data; memcpy_toio(dst, fw_data, fw_size); diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c index d6ec12b3e692..08fc89ea0a0c 100644 --- a/drivers/misc/habanalabs/goya/goya_security.c +++ b/drivers/misc/habanalabs/goya/goya_security.c @@ -695,7 +695,6 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); - mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2); mask |= 1 << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2); mask |= 1 << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); @@ -875,6 +874,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC1_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC1_WR_REGULATOR_BASE); + pb_addr = (mmTPC1_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC1_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -882,6 +891,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); @@ -1057,6 +1070,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC2_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC2_WR_REGULATOR_BASE); + pb_addr = (mmTPC2_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC2_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -1064,6 +1087,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); @@ -1239,6 +1266,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC3_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC3_WR_REGULATOR_BASE); + pb_addr = (mmTPC3_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC3_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -1246,6 +1283,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); @@ -1421,6 +1462,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC4_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC4_WR_REGULATOR_BASE); + pb_addr = (mmTPC4_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC4_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -1428,6 +1479,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); @@ -1603,6 +1658,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC5_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC5_WR_REGULATOR_BASE); + pb_addr = (mmTPC5_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC5_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -1610,6 +1675,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); @@ -1785,6 +1854,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC6_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC6_WR_REGULATOR_BASE); + pb_addr = (mmTPC6_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC6_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -1792,6 +1871,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); @@ -1967,6 +2050,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) goya_pb_set_block(hdev, mmTPC7_RD_REGULATOR_BASE); goya_pb_set_block(hdev, mmTPC7_WR_REGULATOR_BASE); + pb_addr = (mmTPC7_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2; + + mask = 1 << ((mmTPC7_CFG_SEMAPHORE & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_STATUS & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + pb_addr = (mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; word_offset = ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & PROT_BITS_OFFS) >> 7) << 2; @@ -1974,6 +2067,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev) mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_TPC_INTR_CAUSE & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_TPC_INTR_MASK & 0x7F) >> 2); WREG32(pb_addr + word_offset, ~mask); diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index 75862be53c60..30addffd76f5 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -23,7 +23,7 @@ #define HL_MMAP_CB_MASK (0x8000000000000000ull >> PAGE_SHIFT) -#define HL_PENDING_RESET_PER_SEC 5 +#define HL_PENDING_RESET_PER_SEC 30 #define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */ diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c index 22566b75ca50..acfccf32be6b 100644 --- a/drivers/misc/habanalabs/memory.c +++ b/drivers/misc/habanalabs/memory.c @@ -67,6 +67,11 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift; total_size = num_pgs << page_shift; + if (!total_size) { + dev_err(hdev->dev, "Cannot allocate 0 bytes\n"); + return -EINVAL; + } + contiguous = args->flags & HL_MEM_CONTIGUOUS; if (contiguous) { @@ -94,7 +99,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, phys_pg_pack->contiguous = contiguous; phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL); - if (!phys_pg_pack->pages) { + if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { rc = -ENOMEM; goto pages_arr_err; } @@ -689,7 +694,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64), GFP_KERNEL); - if (!phys_pg_pack->pages) { + if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { rc = -ENOMEM; goto page_pack_arr_mem_err; } diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c index 176c315836f1..d66e16de4cda 100644 --- a/drivers/misc/habanalabs/mmu.c +++ b/drivers/misc/habanalabs/mmu.c @@ -422,7 +422,7 @@ int hl_mmu_init(struct hl_device *hdev) hdev->mmu_shadow_hop0 = kvmalloc_array(prop->max_asid, prop->mmu_hop_table_size, GFP_KERNEL | __GFP_ZERO); - if (!hdev->mmu_shadow_hop0) { + if (ZERO_OR_NULL_PTR(hdev->mmu_shadow_hop0)) { rc = -ENOMEM; goto err_pool_add; } diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 53bb394ccba6..cef97a7eb8b6 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -745,9 +745,8 @@ static int mei_cl_device_remove(struct device *dev) mei_cl_bus_module_put(cldev); module_put(THIS_MODULE); - dev->driver = NULL; - return ret; + return ret; } static ssize_t name_show(struct device *dev, struct device_attribute *a, diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index 48ba7e02bed7..d4c14b617201 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -602,10 +602,10 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, const u32 depth) { u32 reg = 0; - u32 res; - u32 n, i; + int res, i, nr_pages; + u32 n; u32 *addr = NULL; - struct page *page[MAX_NUM_PAGES]; + struct page *pages[MAX_NUM_PAGES]; /* * Writes that go beyond the length of @@ -622,15 +622,22 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE) n += 1; - res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page); - if (res < n) { - for (i = 0; i < res; i++) - put_page(page[i]); + if (WARN_ON_ONCE(n > INT_MAX)) + return -EINVAL; + + nr_pages = n; + + res = get_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages); + if (res < nr_pages) { + if (res > 0) { + for (i = 0; i < res; i++) + put_page(pages[i]); + } return -EINVAL; } - for (i = 0; i < n; i++) { - addr = kmap(page[i]); + for (i = 0; i < nr_pages; i++) { + addr = kmap(pages[i]); do { xsdfec_regwrite(xsdfec, base_addr + ((offset + reg) * @@ -639,7 +646,7 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, reg++; } while ((reg < len) && ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)); - put_page(page[i]); + put_page(pages[i]); } return reg; } diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c index 93d346c01110..4c229dd2b6e5 100644 --- a/drivers/mmc/core/sdio_ops.c +++ b/drivers/mmc/core/sdio_ops.c @@ -121,6 +121,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, struct sg_table sgtable; unsigned int nents, left_size, i; unsigned int seg_size = card->host->max_seg_size; + int err; WARN_ON(blksz == 0); @@ -170,28 +171,32 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, mmc_set_data_timeout(&data, card); + mmc_pre_req(card->host, &mrq); + mmc_wait_for_req(card->host, &mrq); + if (cmd.error) + err = cmd.error; + else if (data.error) + err = data.error; + else if (mmc_host_is_spi(card->host)) + /* host driver already reported errors */ + err = 0; + else if (cmd.resp[0] & R5_ERROR) + err = -EIO; + else if (cmd.resp[0] & R5_FUNCTION_NUMBER) + err = -EINVAL; + else if (cmd.resp[0] & R5_OUT_OF_RANGE) + err = -ERANGE; + else + err = 0; + + mmc_post_req(card->host, &mrq, err); + if (nents > 1) sg_free_table(&sgtable); - if (cmd.error) - return cmd.error; - if (data.error) - return data.error; - - if (mmc_host_is_spi(card->host)) { - /* host driver already reported errors */ - } else { - if (cmd.resp[0] & R5_ERROR) - return -EIO; - if (cmd.resp[0] & R5_FUNCTION_NUMBER) - return -EINVAL; - if (cmd.resp[0] & R5_OUT_OF_RANGE) - return -ERANGE; - } - - return 0; + return err; } int sdio_reset(struct mmc_host *host) diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index c19f4c3f115a..2d65b32d205a 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -299,16 +299,16 @@ static void __cqhci_disable(struct cqhci_host *cq_host) cq_host->activated = false; } -int cqhci_suspend(struct mmc_host *mmc) +int cqhci_deactivate(struct mmc_host *mmc) { struct cqhci_host *cq_host = mmc->cqe_private; - if (cq_host->enabled) + if (cq_host->enabled && cq_host->activated) __cqhci_disable(cq_host); return 0; } -EXPORT_SYMBOL(cqhci_suspend); +EXPORT_SYMBOL(cqhci_deactivate); int cqhci_resume(struct mmc_host *mmc) { diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h index def76e9b5cac..437700179de4 100644 --- a/drivers/mmc/host/cqhci.h +++ b/drivers/mmc/host/cqhci.h @@ -230,7 +230,11 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, int data_error); int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64); struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev); -int cqhci_suspend(struct mmc_host *mmc); +int cqhci_deactivate(struct mmc_host *mmc); +static inline int cqhci_suspend(struct mmc_host *mmc) +{ + return cqhci_deactivate(mmc); +} int cqhci_resume(struct mmc_host *mmc); #endif diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index e712315c7e8d..545c3f2f8a06 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1151,9 +1151,11 @@ static int meson_mmc_probe(struct platform_device *pdev) mmc->caps |= MMC_CAP_CMD23; if (host->dram_access_quirk) { + /* Limit segments to 1 due to low available sram memory */ + mmc->max_segs = 1; /* Limit to the available sram memory */ - mmc->max_segs = SD_EMMC_SRAM_DATA_BUF_LEN / mmc->max_blk_size; - mmc->max_blk_count = mmc->max_segs; + mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN / + mmc->max_blk_size; } else { mmc->max_blk_count = CMD_CFG_LENGTH_MASK; mmc->max_segs = SD_EMMC_DESC_BUF_LEN / diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 999214e8cf2b..360d523132bd 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -246,6 +246,9 @@ static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host) mrq = host->mrq; + if (host->cmd->error) + meson_mx_mmc_soft_reset(host); + host->mrq = NULL; host->cmd = NULL; diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index c37e70dbe250..7e4bc9124efd 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -168,6 +168,8 @@ static struct variant_data variant_ux500 = { .cmdreg_srsp = MCI_CPSM_RESPONSE, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -201,6 +203,8 @@ static struct variant_data variant_ux500v2 = { .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -260,6 +264,7 @@ static struct variant_data variant_stm32_sdmmc = { .datacnt_useless = true, .datalength_bits = 25, .datactrl_blocksz = 14, + .datactrl_any_blocksz = true, .stm32_idmabsize_mask = GENMASK(12, 5), .init = sdmmc_variant_init, }; @@ -279,6 +284,7 @@ static struct variant_data variant_qcom = { .data_cmd_enable = MCI_CPSM_QCOM_DATCMD, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, .pwrreg_powerup = MCI_PWR_UP, .f_max = 208000000, .explicit_mclk_control = true, @@ -447,10 +453,11 @@ void mmci_dma_setup(struct mmci_host *host) static int mmci_validate_data(struct mmci_host *host, struct mmc_data *data) { + struct variant_data *variant = host->variant; + if (!data) return 0; - - if (!is_power_of_2(data->blksz)) { + if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) { dev_err(mmc_dev(host->mmc), "unsupported block size (%d bytes)\n", data->blksz); return -EINVAL; @@ -515,7 +522,9 @@ int mmci_dma_start(struct mmci_host *host, unsigned int datactrl) "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", data->sg_len, data->blksz, data->blocks, data->flags); - host->ops->dma_start(host, &datactrl); + ret = host->ops->dma_start(host, &datactrl); + if (ret) + return ret; /* Trigger the DMA transfer */ mmci_write_datactrlreg(host, datactrl); @@ -822,6 +831,18 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data, if (data->blksz * data->blocks <= variant->fifosize) return -EINVAL; + /* + * This is necessary to get SDIO working on the Ux500. We do not yet + * know if this is a bug in: + * - The Ux500 DMA controller (DMA40) + * - The MMCI DMA interface on the Ux500 + * some power of two blocks (such as 64 bytes) are sent regularly + * during SDIO traffic and those work fine so for these we enable DMA + * transfers. + */ + if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz)) + return -EINVAL; + device = chan->device; nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); @@ -872,9 +893,14 @@ int mmci_dmae_prep_data(struct mmci_host *host, int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl) { struct mmci_dmae_priv *dmae = host->dma_priv; + int ret; host->dma_in_progress = true; - dmaengine_submit(dmae->desc_current); + ret = dma_submit_error(dmaengine_submit(dmae->desc_current)); + if (ret < 0) { + host->dma_in_progress = false; + return ret; + } dma_async_issue_pending(dmae->cur); *datactrl |= MCI_DPSM_DMAENABLE; diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 833236ecb31e..89ab73343cf3 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -278,7 +278,11 @@ struct mmci_host; * @stm32_clkdiv: true if using a STM32-specific clock divider algorithm * @datactrl_mask_ddrmode: ddr mode mask in datactrl register. * @datactrl_mask_sdio: SDIO enable mask in datactrl register - * @datactrl_blksz: block size in power of two + * @datactrl_blocksz: block size in power of two + * @datactrl_any_blocksz: true if block any block sizes are accepted by + * hardware, such as with some SDIO traffic that send + * odd packets. + * @dma_power_of_2: DMA only works with blocks that are a power of 2. * @datactrl_first: true if data must be setup before send command * @datacnt_useless: true if you could not use datacnt register to read * remaining data @@ -323,6 +327,8 @@ struct variant_data { unsigned int datactrl_mask_ddrmode; unsigned int datactrl_mask_sdio; unsigned int datactrl_blocksz; + u8 datactrl_any_blocksz:1; + u8 dma_power_of_2:1; u8 datactrl_first:1; u8 datacnt_useless:1; u8 st_sdio:1; diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 010fe29a4888..9d47a2bd2546 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -412,6 +413,7 @@ struct msdc_host { struct pinctrl_state *pins_uhs; struct delayed_work req_timeout; int irq; /* host interrupt */ + struct reset_control *reset; struct clk *src_clk; /* msdc source clock */ struct clk *h_clk; /* msdc h_clk */ @@ -1474,6 +1476,12 @@ static void msdc_init_hw(struct msdc_host *host) u32 val; u32 tune_reg = host->dev_comp->pad_tune_reg; + if (host->reset) { + reset_control_assert(host->reset); + usleep_range(10, 50); + reset_control_deassert(host->reset); + } + /* Configure to MMC/SD mode, clock free running */ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN); @@ -2232,6 +2240,11 @@ static int msdc_drv_probe(struct platform_device *pdev) if (IS_ERR(host->src_clk_cg)) host->src_clk_cg = NULL; + host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev, + "hrst"); + if (IS_ERR(host->reset)) + return PTR_ERR(host->reset); + host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { ret = -EINVAL; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index a66f8d6d61d1..cb89f0578d42 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -229,15 +229,12 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg) DTRAN_CTRL_DM_START); } -static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) +static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host) { - struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; enum dma_data_direction dir; - spin_lock_irq(&host->lock); - if (!host->data) - goto out; + return false; if (host->data->flags & MMC_DATA_READ) dir = DMA_FROM_DEVICE; @@ -250,6 +247,17 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) if (dir == DMA_FROM_DEVICE) clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); + return true; +} + +static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + + spin_lock_irq(&host->lock); + if (!renesas_sdhi_internal_dmac_complete(host)) + goto out; + tmio_mmc_do_data_irq(host); out: spin_unlock_irq(&host->lock); diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 01fc437ed965..b2d924c5e82e 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -532,6 +532,11 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = { .caps = MMC_CAP_NONREMOVABLE, }; +struct amd_sdhci_host { + bool tuned_clock; + bool dll_enabled; +}; + /* AMD sdhci reset dll register. */ #define SDHCI_AMD_RESET_DLL_REGISTER 0x908 @@ -542,39 +547,96 @@ static int amd_select_drive_strength(struct mmc_card *card, return MMC_SET_DRIVER_TYPE_A; } -static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host) +static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable) { + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + /* AMD Platform requires dll setting */ sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER); usleep_range(10, 20); - sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); + if (enable) + sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); + + amd_host->dll_enabled = enable; } /* - * For AMD Platform it is required to disable the tuning - * bit first controller to bring to HS Mode from HS200 - * mode, later enable to tune to HS400 mode. + * The initialization sequence for HS400 is: + * HS->HS200->Perform Tuning->HS->HS400 + * + * The re-tuning sequence is: + * HS400->DDR52->HS->HS200->Perform Tuning->HS->HS400 + * + * The AMD eMMC Controller can only use the tuned clock while in HS200 and HS400 + * mode. If we switch to a different mode, we need to disable the tuned clock. + * If we have previously performed tuning and switch back to HS200 or + * HS400, we can re-enable the tuned clock. + * */ static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); unsigned int old_timing = host->timing; + u16 val; sdhci_set_ios(mmc, ios); - if (old_timing == MMC_TIMING_MMC_HS200 && - ios->timing == MMC_TIMING_MMC_HS) - sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2); - if (old_timing != MMC_TIMING_MMC_HS400 && - ios->timing == MMC_TIMING_MMC_HS400) { - sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2); - sdhci_acpi_amd_hs400_dll(host); + + if (old_timing != host->timing && amd_host->tuned_clock) { + if (host->timing == MMC_TIMING_MMC_HS400 || + host->timing == MMC_TIMING_MMC_HS200) { + val = sdhci_readw(host, SDHCI_HOST_CONTROL2); + val |= SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, val, SDHCI_HOST_CONTROL2); + } else { + val = sdhci_readw(host, SDHCI_HOST_CONTROL2); + val &= ~SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, val, SDHCI_HOST_CONTROL2); + } + + /* DLL is only required for HS400 */ + if (host->timing == MMC_TIMING_MMC_HS400 && + !amd_host->dll_enabled) + sdhci_acpi_amd_hs400_dll(host, true); } } +static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + int err; + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + + amd_host->tuned_clock = false; + + err = sdhci_execute_tuning(mmc, opcode); + + if (!err && !host->tuning_err) + amd_host->tuned_clock = true; + + return err; +} + +static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + + if (mask & SDHCI_RESET_ALL) { + amd_host->tuned_clock = false; + sdhci_acpi_amd_hs400_dll(host, false); + } + + sdhci_reset(host, mask); +} + static const struct sdhci_ops sdhci_acpi_ops_amd = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, + .reset = amd_sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; @@ -598,6 +660,7 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev, host->mmc_host_ops.select_drive_strength = amd_select_drive_strength; host->mmc_host_ops.set_ios = amd_set_ios; + host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning; return 0; } @@ -609,6 +672,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = { SDHCI_QUIRK_32BIT_ADMA_SIZE, .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA, .probe_slot = sdhci_acpi_emmc_amd_probe_slot, + .priv_size = sizeof(struct amd_sdhci_host), }; struct sdhci_acpi_uid_slot { diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index 5f2e9696ee4d..0c2489446bd7 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -194,6 +194,79 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv) return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp); } +static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val) +{ + struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host); + void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06; + u32 tmp; + int i, ret; + + if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val))) + return -EINVAL; + + tmp = readl(reg); + tmp &= ~SDHCI_CDNS_HRS06_TUNE; + tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val); + + /* + * Workaround for IP errata: + * The IP6116 SD/eMMC PHY design has a timing issue on receive data + * path. Send tune request twice. + */ + for (i = 0; i < 2; i++) { + tmp |= SDHCI_CDNS_HRS06_TUNE_UP; + writel(tmp, reg); + + ret = readl_poll_timeout(reg, tmp, + !(tmp & SDHCI_CDNS_HRS06_TUNE_UP), + 0, 1); + if (ret) + return ret; + } + + return 0; +} + +/* + * In SD mode, software must not use the hardware tuning and instead perform + * an almost identical procedure to eMMC. + */ +static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode) +{ + int cur_streak = 0; + int max_streak = 0; + int end_of_streak = 0; + int i; + + /* + * Do not execute tuning for UHS_SDR50 or UHS_DDR50. + * The delay is set by probe, based on the DT properties. + */ + if (host->timing != MMC_TIMING_MMC_HS200 && + host->timing != MMC_TIMING_UHS_SDR104) + return 0; + + for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) { + if (sdhci_cdns_set_tune_val(host, i) || + mmc_send_tuning(host->mmc, opcode, NULL)) { /* bad */ + cur_streak = 0; + } else { /* good */ + cur_streak++; + if (cur_streak > max_streak) { + max_streak = cur_streak; + end_of_streak = i; + } + } + } + + if (!max_streak) { + dev_err(mmc_dev(host->mmc), "no tuning point found\n"); + return -EIO; + } + + return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2); +} + static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) { @@ -233,6 +306,7 @@ static const struct sdhci_ops sdhci_cdns_ops = { .get_timeout_clock = sdhci_cdns_get_timeout_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, + .platform_execute_tuning = sdhci_cdns_execute_tuning, .set_uhs_signaling = sdhci_cdns_set_uhs_signaling, }; @@ -245,78 +319,6 @@ static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = { .ops = &sdhci_cdns_ops, }; -static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val) -{ - struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host); - void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06; - u32 tmp; - int i, ret; - - if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val))) - return -EINVAL; - - tmp = readl(reg); - tmp &= ~SDHCI_CDNS_HRS06_TUNE; - tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val); - - /* - * Workaround for IP errata: - * The IP6116 SD/eMMC PHY design has a timing issue on receive data - * path. Send tune request twice. - */ - for (i = 0; i < 2; i++) { - tmp |= SDHCI_CDNS_HRS06_TUNE_UP; - writel(tmp, reg); - - ret = readl_poll_timeout(reg, tmp, - !(tmp & SDHCI_CDNS_HRS06_TUNE_UP), - 0, 1); - if (ret) - return ret; - } - - return 0; -} - -static int sdhci_cdns_execute_tuning(struct mmc_host *mmc, u32 opcode) -{ - struct sdhci_host *host = mmc_priv(mmc); - int cur_streak = 0; - int max_streak = 0; - int end_of_streak = 0; - int i; - - /* - * This handler only implements the eMMC tuning that is specific to - * this controller. Fall back to the standard method for SD timing. - */ - if (host->timing != MMC_TIMING_MMC_HS200) - return sdhci_execute_tuning(mmc, opcode); - - if (WARN_ON(opcode != MMC_SEND_TUNING_BLOCK_HS200)) - return -EINVAL; - - for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) { - if (sdhci_cdns_set_tune_val(host, i) || - mmc_send_tuning(host->mmc, opcode, NULL)) { /* bad */ - cur_streak = 0; - } else { /* good */ - cur_streak++; - if (cur_streak > max_streak) { - max_streak = cur_streak; - end_of_streak = i; - } - } - } - - if (!max_streak) { - dev_err(mmc_dev(host->mmc), "no tuning point found\n"); - return -EIO; - } - - return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2); -} - static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios) { @@ -377,7 +379,6 @@ static int sdhci_cdns_probe(struct platform_device *pdev) priv->hrs_addr = host->ioaddr; priv->enhanced_strobe = false; host->ioaddr += SDHCI_CDNS_SRS_BASE; - host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning; host->mmc_host_ops.hs400_enhanced_strobe = sdhci_cdns_hs400_enhanced_strobe; sdhci_enable_v4_mode(host); diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 8b2a6a362c60..8bed81cf03ad 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1096,7 +1096,7 @@ static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); - int tuning_seq_cnt = 3; + int tuning_seq_cnt = 10; u8 phase, tuned_phases[16], tuned_phase_cnt = 0; int rc; struct mmc_ios ios = host->mmc->ios; @@ -1152,6 +1152,22 @@ retry: } while (++phase < ARRAY_SIZE(tuned_phases)); if (tuned_phase_cnt) { + if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) { + /* + * All phases valid is _almost_ as bad as no phases + * valid. Probably all phases are not really reliable + * but we didn't detect where the unreliable place is. + * That means we'll essentially be guessing and hoping + * we get a good phase. Better to try a few times. + */ + dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n", + mmc_hostname(mmc)); + if (--tuning_seq_cnt) { + tuned_phase_cnt = 0; + goto retry; + } + } + rc = msm_find_most_appropriate_phase(host, tuned_phases, tuned_phase_cnt); if (rc < 0) @@ -1742,7 +1758,9 @@ static const struct sdhci_ops sdhci_msm_ops = { static const struct sdhci_pltfm_data sdhci_msm_pdata = { .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | SDHCI_QUIRK_SINGLE_POWER_WRITE | - SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, .ops = &sdhci_msm_ops, }; diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c index 8962f6664381..47ddded57000 100644 --- a/drivers/mmc/host/sdhci-of-aspeed.c +++ b/drivers/mmc/host/sdhci-of-aspeed.c @@ -68,7 +68,7 @@ static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) if (WARN_ON(clock > host->max_clk)) clock = host->max_clk; - for (div = 1; div < 256; div *= 2) { + for (div = 2; div < 256; div *= 2) { if ((parent / div) <= clock) break; } diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 1e52a100b620..561231a9809f 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -81,6 +81,7 @@ struct sdhci_esdhc { bool quirk_tuning_erratum_type2; bool quirk_ignore_data_inhibit; bool quirk_delay_before_data_reset; + bool quirk_trans_complete_erratum; bool in_sw_tuning; unsigned int peripheral_clock; const struct esdhc_clk_fixup *clk_fixup; @@ -1103,10 +1104,11 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, static u32 esdhc_irq(struct sdhci_host *host, u32 intmask) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); u32 command; - if (of_find_compatible_node(NULL, NULL, - "fsl,p2020-esdhc")) { + if (esdhc->quirk_trans_complete_erratum) { command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); if (command == MMC_WRITE_MULTIPLE_BLOCK && @@ -1260,8 +1262,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) esdhc->clk_fixup = match->data; np = pdev->dev.of_node; - if (of_device_is_compatible(np, "fsl,p2020-esdhc")) + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { esdhc->quirk_delay_before_data_reset = true; + esdhc->quirk_trans_complete_erratum = true; + } clk = of_clk_get(np, 0); if (!IS_ERR(clk)) { diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 9b66e8b374ed..425aa898e797 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -232,6 +232,14 @@ static void sdhci_pci_dumpregs(struct mmc_host *mmc) sdhci_dumpregs(mmc_priv(mmc)); } +static void sdhci_cqhci_reset(struct sdhci_host *host, u8 mask) +{ + if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) && + host->mmc->cqe_private) + cqhci_deactivate(host->mmc); + sdhci_reset(host, mask); +} + /*****************************************************************************\ * * * Hardware specific quirk handling * @@ -722,7 +730,7 @@ static const struct sdhci_ops sdhci_intel_glk_ops = { .set_power = sdhci_intel_set_power, .enable_dma = sdhci_pci_enable_dma, .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, + .reset = sdhci_cqhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, .irq = sdhci_cqhci_irq, diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index fa8105087d68..41a2394313dd 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -561,6 +561,12 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd; } + if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD1) { + slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd; + host->mmc->caps2 |= MMC_CAP2_NO_SDIO; + host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; + } + host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning; if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2) diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index a25c3a4d3f6c..c105356ad4cb 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -100,6 +100,12 @@ #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8) #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9) +/* + * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra + * SDMMC hardware data timeout. + */ +#define NVQUIRK_HAS_TMCLK BIT(10) + /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */ #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000 @@ -130,6 +136,7 @@ struct sdhci_tegra_autocal_offsets { struct sdhci_tegra { const struct sdhci_tegra_soc_data *soc_data; struct gpio_desc *power_gpio; + struct clk *tmclk; bool ddr_signaling; bool pad_calib_required; bool pad_control_available; @@ -1370,7 +1377,6 @@ static const struct sdhci_ops tegra210_sdhci_ops = { static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | - SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | @@ -1386,7 +1392,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = { NVQUIRK_HAS_PADCALIB | NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | NVQUIRK_ENABLE_SDR50 | - NVQUIRK_ENABLE_SDR104, + NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_TMCLK, .min_tap_delay = 106, .max_tap_delay = 185, }; @@ -1407,7 +1414,6 @@ static const struct sdhci_ops tegra186_sdhci_ops = { static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | - SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | @@ -1424,6 +1430,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra186 = { NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | NVQUIRK_ENABLE_SDR50 | NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_TMCLK | NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING, .min_tap_delay = 84, .max_tap_delay = 136, @@ -1436,7 +1443,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra194 = { NVQUIRK_HAS_PADCALIB | NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | NVQUIRK_ENABLE_SDR50 | - NVQUIRK_ENABLE_SDR104, + NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_TMCLK, .min_tap_delay = 96, .max_tap_delay = 139, }; @@ -1564,6 +1572,43 @@ static int sdhci_tegra_probe(struct platform_device *pdev) goto err_power_req; } + /* + * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host + * timeout clock and SW can choose TMCLK or SDCLK for hardware + * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of + * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL. + * + * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses + * 12Mhz TMCLK which is advertised in host capability register. + * With TMCLK of 12Mhz provides maximum data timeout period that can + * be achieved is 11s better than using SDCLK for data timeout. + * + * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's + * supporting separate TMCLK. + */ + + if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) { + clk = devm_clk_get(&pdev->dev, "tmclk"); + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -EPROBE_DEFER) + goto err_power_req; + + dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc); + clk = NULL; + } + + clk_set_rate(clk, 12000000); + rc = clk_prepare_enable(clk); + if (rc) { + dev_err(&pdev->dev, + "failed to enable tmclk: %d\n", rc); + goto err_power_req; + } + + tegra_host->tmclk = clk; + } + clk = devm_clk_get(mmc_dev(host->mmc), NULL); if (IS_ERR(clk)) { rc = PTR_ERR(clk); @@ -1607,6 +1652,7 @@ err_add_host: err_rst_get: clk_disable_unprepare(pltfm_host->clk); err_clk_get: + clk_disable_unprepare(tegra_host->tmclk); err_power_req: err_parse_dt: sdhci_pltfm_free(pdev); @@ -1624,6 +1670,7 @@ static int sdhci_tegra_remove(struct platform_device *pdev) reset_control_assert(tegra_host->rst); usleep_range(2000, 4000); clk_disable_unprepare(pltfm_host->clk); + clk_disable_unprepare(tegra_host->tmclk); sdhci_pltfm_free(pdev); diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index f4ac064ff471..8d96ecba1b55 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -319,6 +319,8 @@ struct via_crdr_mmc_host { /* some devices need a very long delay for power to stabilize */ #define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001 +#define VIA_CMD_TIMEOUT_MS 1000 + static const struct pci_device_id via_ids[] = { {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,}, @@ -551,14 +553,17 @@ static void via_sdc_send_command(struct via_crdr_mmc_host *host, { void __iomem *addrbase; struct mmc_data *data; + unsigned int timeout_ms; u32 cmdctrl = 0; WARN_ON(host->cmd); data = cmd->data; - mod_timer(&host->timer, jiffies + HZ); host->cmd = cmd; + timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : VIA_CMD_TIMEOUT_MS; + mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout_ms)); + /*Command index*/ cmdctrl = cmd->opcode << 8; diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 975aed94f06c..48832f9b215c 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -354,9 +354,6 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, uint32_t retlen; int ret = 0; - if (!(file->f_mode & FMODE_WRITE)) - return -EPERM; - if (length > 4096) return -EINVAL; @@ -641,6 +638,48 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) pr_debug("MTD_ioctl\n"); + /* + * Check the file mode to require "dangerous" commands to have write + * permissions. + */ + switch (cmd) { + /* "safe" commands */ + case MEMGETREGIONCOUNT: + case MEMGETREGIONINFO: + case MEMGETINFO: + case MEMREADOOB: + case MEMREADOOB64: + case MEMLOCK: + case MEMUNLOCK: + case MEMISLOCKED: + case MEMGETOOBSEL: + case MEMGETBADBLOCK: + case MEMSETBADBLOCK: + case OTPSELECT: + case OTPGETREGIONCOUNT: + case OTPGETREGIONINFO: + case OTPLOCK: + case ECCGETLAYOUT: + case ECCGETSTATS: + case MTDFILEMODE: + case BLKPG: + case BLKRRPART: + break; + + /* "dangerous" commands */ + case MEMERASE: + case MEMERASE64: + case MEMWRITEOOB: + case MEMWRITEOOB64: + case MEMWRITE: + if (!(file->f_mode & FMODE_WRITE)) + return -EPERM; + break; + + default: + return -ENOTTY; + } + switch (cmd) { case MEMGETREGIONCOUNT: if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) @@ -688,9 +727,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) { struct erase_info *erase; - if(!(file->f_mode & FMODE_WRITE)) - return -EPERM; - erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); if (!erase) ret = -ENOMEM; @@ -983,9 +1019,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) ret = 0; break; } - - default: - ret = -ENOTTY; } return ret; @@ -1029,6 +1062,11 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, struct mtd_oob_buf32 buf; struct mtd_oob_buf32 __user *buf_user = argp; + if (!(file->f_mode & FMODE_WRITE)) { + ret = -EPERM; + break; + } + if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 15ef30b368a5..e9ad8bb82f44 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -537,8 +537,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) } else { ctrl->cs_offsets = brcmnand_cs_offsets; - /* v5.0 and earlier has a different CS0 offset layout */ - if (ctrl->nand_version <= 0x0500) + /* v3.3-5.0 have a different CS0 offset layout */ + if (ctrl->nand_version >= 0x0303 && + ctrl->nand_version <= 0x0500) ctrl->cs0_offsets = brcmnand_cs_offsets_cs0; } @@ -1019,11 +1020,14 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, if (!section) { /* * Small-page NAND use byte 6 for BBI while large-page - * NAND use byte 0. + * NAND use bytes 0 and 1. */ - if (cfg->page_size > 512) - oobregion->offset++; - oobregion->length--; + if (cfg->page_size > 512) { + oobregion->offset += 2; + oobregion->length -= 2; + } else { + oobregion->length--; + } } } @@ -1787,28 +1791,31 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, struct nand_chip *chip, void *buf, u64 addr) { - int i, sas; - void *oob = chip->oob_poi; + struct mtd_oob_region ecc; + int i; int bitflips = 0; int page = addr >> chip->page_shift; int ret; + void *ecc_bytes; void *ecc_chunk; if (!buf) buf = nand_get_data_buf(chip); - sas = mtd->oobsize / chip->ecc.steps; - /* read without ecc for verification */ ret = chip->ecc.read_page_raw(chip, buf, true, page); if (ret) return ret; - for (i = 0; i < chip->ecc.steps; i++, oob += sas) { + for (i = 0; i < chip->ecc.steps; i++) { ecc_chunk = buf + chip->ecc.size * i; - ret = nand_check_erased_ecc_chunk(ecc_chunk, - chip->ecc.size, - oob, sas, NULL, 0, + + mtd_ooblayout_ecc(mtd, i, &ecc); + ecc_bytes = chip->oob_poi + ecc.offset; + + ret = nand_check_erased_ecc_chunk(ecc_chunk, chip->ecc.size, + ecc_bytes, ecc.length, + NULL, 0, chip->ecc.strength); if (ret < 0) return ret; diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index c0e1a8ebe820..522390b99d3c 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -1609,13 +1609,10 @@ static int __init doc_probe(unsigned long physadr) numchips = doc2001_init(mtd); if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) { - /* DBB note: i believe nand_release is necessary here, as + /* DBB note: i believe nand_cleanup is necessary here, as buffers may have been allocated in nand_base. Check with Thomas. FIX ME! */ - /* nand_release will call mtd_device_unregister, but we - haven't yet added it. This is handled without incident by - mtd_device_unregister, as far as I can tell. */ - nand_release(nand); + nand_cleanup(nand); goto fail; } diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index 1054cc070747..20b0ee174dc6 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -62,7 +62,6 @@ static int fun_chip_ready(struct nand_chip *chip) static void fun_wait_rnb(struct fsl_upm_nand *fun) { if (fun->rnb_gpio[fun->mchip_number] >= 0) { - struct mtd_info *mtd = nand_to_mtd(&fun->chip); int cnt = 1000000; while (--cnt && !fun_chip_ready(&fun->chip)) diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c index 49afebee50db..4b7c399d4f4b 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c @@ -376,7 +376,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, ret = mtd_device_register(mtd, NULL, 0); if (ret) { - nand_release(chip); + nand_cleanup(chip); return ret; } diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index fc49e13d81ec..ee4afa17d8a3 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -707,7 +707,7 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms) * In case the interrupt was not served in the required time frame, * check if the ISR was not served or if something went actually wrong. */ - if (ret && !pending) { + if (!ret && !pending) { dev_err(nfc->dev, "Timeout waiting for RB signal\n"); return -ETIMEDOUT; } @@ -2664,7 +2664,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "failed to register mtd device: %d\n", ret); - nand_release(chip); + nand_cleanup(chip); return ret; } @@ -2673,6 +2673,16 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, return 0; } +static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc) +{ + struct marvell_nand_chip *entry, *temp; + + list_for_each_entry_safe(entry, temp, &nfc->chips, node) { + nand_release(&entry->chip); + list_del(&entry->node); + } +} + static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc) { struct device_node *np = dev->of_node; @@ -2707,21 +2717,16 @@ static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc) ret = marvell_nand_chip_init(dev, nfc, nand_np); if (ret) { of_node_put(nand_np); - return ret; + goto cleanup_chips; } } return 0; -} -static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc) -{ - struct marvell_nand_chip *entry, *temp; +cleanup_chips: + marvell_nand_chips_cleanup(nfc); - list_for_each_entry_safe(entry, temp, &nfc->chips, node) { - nand_release(&entry->chip); - list_del(&entry->node); - } + return ret; } static int marvell_nfc_init_dma(struct marvell_nfc *nfc) diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 373d47d1ba4c..08008c844a47 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1419,7 +1419,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "mtd parse partition error\n"); - nand_release(nand); + nand_cleanup(nand); return ret; } diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index a12001f5660c..f3a601501e3a 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -731,8 +731,14 @@ EXPORT_SYMBOL_GPL(nand_soft_waitrdy); int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, unsigned long timeout_ms) { - /* Wait until R/B pin indicates chip is ready or timeout occurs */ - timeout_ms = jiffies + msecs_to_jiffies(timeout_ms); + + /* + * Wait until R/B pin indicates chip is ready or timeout occurs. + * +1 below is necessary because if we are now in the last fraction + * of jiffy and msecs_to_jiffies is 1 then we will wait only that + * small jiffy fraction - possibly leading to false timeout. + */ + timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; do { if (gpiod_get_value_cansleep(gpiod)) return 0; diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index 0b879bd0a68c..8fe8d7bdd203 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -173,7 +173,7 @@ int nand_onfi_detect(struct nand_chip *chip) } if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) == - le16_to_cpu(p->crc)) { + le16_to_cpu(p[i].crc)) { if (i) memcpy(p, &p[i], sizeof(*p)); break; diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index f64b06a71dfa..f12b7a7844c9 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -314,10 +314,9 @@ int onfi_fill_data_interface(struct nand_chip *chip, /* microseconds -> picoseconds */ timings->tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX; timings->tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX; - timings->tR_max = 1000000ULL * 200000000ULL; - /* nanoseconds -> picoseconds */ - timings->tCCS_min = 1000UL * 500000; + timings->tR_max = 200000000; + timings->tCCS_min = 500000; } return 0; diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c index d27b39a7223c..a3dcdf25f5f2 100644 --- a/drivers/mtd/nand/raw/orion_nand.c +++ b/drivers/mtd/nand/raw/orion_nand.c @@ -180,7 +180,7 @@ static int __init orion_nand_probe(struct platform_device *pdev) mtd->name = "orion_nand"; ret = mtd_device_register(mtd, board->parts, board->nr_parts); if (ret) { - nand_release(nc); + nand_cleanup(nc); goto no_dev; } diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c index c43cb4d92d3d..23c222b6c40e 100644 --- a/drivers/mtd/nand/raw/oxnas_nand.c +++ b/drivers/mtd/nand/raw/oxnas_nand.c @@ -32,6 +32,7 @@ struct oxnas_nand_ctrl { void __iomem *io_base; struct clk *clk; struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS]; + unsigned int nchips; }; static uint8_t oxnas_nand_read_byte(struct nand_chip *chip) @@ -79,9 +80,9 @@ static int oxnas_nand_probe(struct platform_device *pdev) struct nand_chip *chip; struct mtd_info *mtd; struct resource *res; - int nchips = 0; int count = 0; int err = 0; + int i; /* Allocate memory for the device structure (and zero it) */ oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas), @@ -140,17 +141,15 @@ static int oxnas_nand_probe(struct platform_device *pdev) goto err_release_child; err = mtd_device_register(mtd, NULL, 0); - if (err) { - nand_release(chip); - goto err_release_child; - } + if (err) + goto err_cleanup_nand; - oxnas->chips[nchips] = chip; - ++nchips; + oxnas->chips[oxnas->nchips] = chip; + ++oxnas->nchips; } /* Exit if no chips found */ - if (!nchips) { + if (!oxnas->nchips) { err = -ENODEV; goto err_clk_unprepare; } @@ -159,8 +158,17 @@ static int oxnas_nand_probe(struct platform_device *pdev) return 0; +err_cleanup_nand: + nand_cleanup(chip); err_release_child: of_node_put(nand_np); + + for (i = 0; i < oxnas->nchips; i++) { + chip = oxnas->chips[i]; + WARN_ON(mtd_device_unregister(nand_to_mtd(chip))); + nand_cleanup(chip); + } + err_clk_unprepare: clk_disable_unprepare(oxnas->clk); return err; @@ -169,9 +177,13 @@ err_clk_unprepare: static int oxnas_nand_remove(struct platform_device *pdev) { struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev); + struct nand_chip *chip; + int i; - if (oxnas->chips[0]) - nand_release(oxnas->chips[0]); + for (i = 0; i < oxnas->nchips; i++) { + chip = oxnas->chips[i]; + nand_release(chip); + } clk_disable_unprepare(oxnas->clk); diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c index 9cfe7395172a..066ff6dc9a23 100644 --- a/drivers/mtd/nand/raw/pasemi_nand.c +++ b/drivers/mtd/nand/raw/pasemi_nand.c @@ -146,7 +146,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { dev_err(dev, "Unable to register MTD device\n"); err = -ENODEV; - goto out_lpc; + goto out_cleanup_nand; } dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res, @@ -154,6 +154,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev) return 0; + out_cleanup_nand: + nand_cleanup(chip); out_lpc: release_region(lpcctl, 4); out_ior: diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c index dc0f3074ddbf..3a495b233443 100644 --- a/drivers/mtd/nand/raw/plat_nand.c +++ b/drivers/mtd/nand/raw/plat_nand.c @@ -92,7 +92,7 @@ static int plat_nand_probe(struct platform_device *pdev) if (!err) return err; - nand_release(&data->chip); + nand_cleanup(&data->chip); out: if (pdata->ctrl.remove) pdata->ctrl.remove(pdev); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 7bb9a7e8e1e7..c1c53b02b35f 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -459,11 +459,13 @@ struct qcom_nand_host { * among different NAND controllers. * @ecc_modes - ecc mode for NAND * @is_bam - whether NAND controller is using BAM + * @is_qpic - whether NAND CTRL is part of qpic IP * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset */ struct qcom_nandc_props { u32 ecc_modes; bool is_bam; + bool is_qpic; u32 dev_cmd_reg_start; }; @@ -2751,7 +2753,8 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) u32 nand_ctrl; /* kill onenand */ - nandc_write(nandc, SFLASHC_BURST_CFG, 0); + if (!nandc->props->is_qpic) + nandc_write(nandc, SFLASHC_BURST_CFG, 0); nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD), NAND_DEV_CMD_VLD_VAL); @@ -3007,12 +3010,14 @@ static const struct qcom_nandc_props ipq806x_nandc_props = { static const struct qcom_nandc_props ipq4019_nandc_props = { .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), .is_bam = true, + .is_qpic = true, .dev_cmd_reg_start = 0x0, }; static const struct qcom_nandc_props ipq8074_nandc_props = { .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), .is_bam = true, + .is_qpic = true, .dev_cmd_reg_start = 0x7000, }; diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c index b47a9eaff89b..d8c52a016080 100644 --- a/drivers/mtd/nand/raw/sharpsl.c +++ b/drivers/mtd/nand/raw/sharpsl.c @@ -183,7 +183,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev) return 0; err_add: - nand_release(this); + nand_cleanup(this); err_scan: iounmap(sharpsl->io); diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c index 20f40c0e812c..7c94fc51a611 100644 --- a/drivers/mtd/nand/raw/socrates_nand.c +++ b/drivers/mtd/nand/raw/socrates_nand.c @@ -169,7 +169,7 @@ static int socrates_nand_probe(struct platform_device *ofdev) if (!res) return res; - nand_release(nand_chip); + nand_cleanup(nand_chip); out: iounmap(host->io_base); diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 89773293c64d..45c376fc571a 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -2003,7 +2003,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "failed to register mtd device: %d\n", ret); - nand_release(nand); + nand_cleanup(nand); return ret; } diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c index db030f1701ee..4e9a6d94f6e8 100644 --- a/drivers/mtd/nand/raw/tmio_nand.c +++ b/drivers/mtd/nand/raw/tmio_nand.c @@ -448,7 +448,7 @@ static int tmio_probe(struct platform_device *dev) if (!retval) return retval; - nand_release(nand_chip); + nand_cleanup(nand_chip); err_irq: tmio_hw_stop(dev, tmio); diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c index 834f794816a9..018311dc8fe1 100644 --- a/drivers/mtd/nand/raw/xway_nand.c +++ b/drivers/mtd/nand/raw/xway_nand.c @@ -210,7 +210,7 @@ static int xway_nand_probe(struct platform_device *pdev) err = mtd_device_register(mtd, NULL, 0); if (err) - nand_release(&data->chip); + nand_cleanup(&data->chip); return err; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 703c5c2c80e5..0d7a173f8e61 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2037,7 +2037,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev, int ret; ret = __bond_release_one(bond_dev, slave_dev, false, true); - if (ret == 0 && !bond_has_slaves(bond)) { + if (ret == 0 && !bond_has_slaves(bond) && + bond_dev->reg_state != NETREG_UNREGISTERING) { bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; netdev_info(bond_dev, "Destroying bond\n"); bond_remove_proc_entry(bond); @@ -2777,6 +2778,9 @@ static int bond_ab_arp_inspect(struct bonding *bond) if (bond_time_in_interval(bond, last_rx, 1)) { bond_propose_link_state(slave, BOND_LINK_UP); commit++; + } else if (slave->link == BOND_LINK_BACK) { + bond_propose_link_state(slave, BOND_LINK_FAIL); + commit++; } continue; } @@ -2885,6 +2889,19 @@ static void bond_ab_arp_commit(struct bonding *bond) continue; + case BOND_LINK_FAIL: + bond_set_slave_link_state(slave, BOND_LINK_FAIL, + BOND_SLAVE_NOTIFY_NOW); + bond_set_slave_inactive_flags(slave, + BOND_SLAVE_NOTIFY_NOW); + + /* A slave has just been enslaved and has become + * the current active slave. + */ + if (rtnl_dereference(bond->curr_active_slave)) + RCU_INIT_POINTER(bond->current_arp_slave, NULL); + continue; + default: slave_err(bond->dev, slave->dev, "impossible: link_new_state %d on slave\n", @@ -2935,8 +2952,6 @@ static bool bond_ab_arp_probe(struct bonding *bond) return should_notify_rtnl; } - bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER); - bond_for_each_slave_rcu(bond, slave, iter) { if (!found && !before && bond_slave_is_up(slave)) before = slave; @@ -4246,13 +4261,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } +static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) +{ + if (speed == 0 || speed == SPEED_UNKNOWN) + speed = slave->speed; + else + speed = min(speed, slave->speed); + + return speed; +} + static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, struct ethtool_link_ksettings *cmd) { struct bonding *bond = netdev_priv(bond_dev); - unsigned long speed = 0; struct list_head *iter; struct slave *slave; + u32 speed = 0; cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.port = PORT_OTHER; @@ -4264,8 +4289,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, */ bond_for_each_slave(bond, slave, iter) { if (bond_slave_can_tx(slave)) { - if (slave->speed != SPEED_UNKNOWN) - speed += slave->speed; + if (slave->speed != SPEED_UNKNOWN) { + if (BOND_MODE(bond) == BOND_MODE_BROADCAST) + speed = bond_mode_bcast_speed(slave, + speed); + else + speed += slave->speed; + } if (cmd->base.duplex == DUPLEX_UNKNOWN && slave->duplex != DUPLEX_UNKNOWN) cmd->base.duplex = slave->duplex; @@ -4864,15 +4894,19 @@ int bond_create(struct net *net, const char *name) bond_dev->rtnl_link_ops = &bond_link_ops; res = register_netdevice(bond_dev); + if (res < 0) { + free_netdev(bond_dev); + rtnl_unlock(); + + return res; + } netif_carrier_off(bond_dev); bond_work_init_all(bond); rtnl_unlock(); - if (res < 0) - free_netdev(bond_dev); - return res; + return 0; } static int __net_init bond_net_init(struct net *net) diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index b43b51646b11..f0f9138e967f 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -456,11 +456,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev, return err; err = register_netdevice(bond_dev); - - netif_carrier_off(bond_dev); if (!err) { struct bonding *bond = netdev_priv(bond_dev); + netif_carrier_off(bond_dev); bond_work_init_all(bond); } diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 14850b7fe6d7..4bd66ba72c03 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1523,6 +1523,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, return ret; switch (ret) { + case -ETIMEDOUT: + return ret; case -ENOSPC: dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", addr, vid); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 9502db66092e..b16aea0e3999 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1116,6 +1116,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) set_bit(0, priv->cfp.used); set_bit(0, priv->cfp.unique); + /* Balance of_node_put() done by of_find_node_by_name() */ + of_node_get(dn); ports = of_find_node_by_name(dn, "ports"); if (ports) { bcm_sf2_identify_ports(priv, ports); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index a69c9b9878b7..636966e93517 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1451,7 +1451,8 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port, unsupported: bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); - dev_err(ds->dev, "Unsupported interface: %d\n", state->interface); + dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", + phy_modes(state->interface), port); return; } diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 24a5e99f7fd5..84c4319e3b31 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1267,6 +1267,9 @@ static int ksz8795_switch_init(struct ksz_device *dev) return -ENOMEM; } + /* set the real number of ports */ + dev->ds->num_ports = dev->port_cnt; + return 0; } diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 50ffc63d6231..49ab1346dc3f 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -973,23 +973,6 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port, PORT_MIRROR_SNIFFER, false); } -static void ksz9477_phy_setup(struct ksz_device *dev, int port, - struct phy_device *phy) -{ - /* Only apply to port with PHY. */ - if (port >= dev->phy_port_cnt) - return; - - /* The MAC actually cannot run in 1000 half-duplex mode. */ - phy_remove_link_mode(phy, - ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - /* PHY does not support gigabit. */ - if (!(dev->features & GBIT_SUPPORT)) - phy_remove_link_mode(phy, - ETHTOOL_LINK_MODE_1000baseT_Full_BIT); -} - static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data) { bool gbit; @@ -1587,6 +1570,9 @@ static int ksz9477_switch_init(struct ksz_device *dev) return -ENOMEM; } + /* set the real number of ports */ + dev->ds->num_ports = dev->port_cnt; + return 0; } @@ -1599,7 +1585,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .get_port_addr = ksz9477_get_port_addr, .cfg_port_member = ksz9477_cfg_port_member, .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table, - .phy_setup = ksz9477_phy_setup, .port_setup = ksz9477_port_setup, .r_mib_cnt = ksz9477_r_mib_cnt, .r_mib_pkt = ksz9477_r_mib_pkt, @@ -1613,7 +1598,29 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { int ksz9477_switch_register(struct ksz_device *dev) { - return ksz_switch_register(dev, &ksz9477_dev_ops); + int ret, i; + struct phy_device *phydev; + + ret = ksz_switch_register(dev, &ksz9477_dev_ops); + if (ret) + return ret; + + for (i = 0; i < dev->phy_port_cnt; ++i) { + if (!dsa_is_user_port(dev->ds, i)) + continue; + + phydev = dsa_to_port(dev->ds, i)->slave->phydev; + + /* The MAC actually cannot run in 1000 half-duplex mode. */ + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + /* PHY does not support gigabit. */ + if (!(dev->features & GBIT_SUPPORT)) + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT); + } + return ret; } EXPORT_SYMBOL(ksz9477_switch_register); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index fe47180c908b..7fabc0e3d807 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -366,8 +366,6 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) /* setup slave port */ dev->dev_ops->port_setup(dev, port, false); - if (dev->dev_ops->phy_setup) - dev->dev_ops->phy_setup(dev, port, phy); /* port_stp_state_set() will be called after to enable the port so * there is no need to do anything. diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index a20ebb749377..061142b183cb 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -120,8 +120,6 @@ struct ksz_dev_ops { u32 (*get_port_addr)(int port, int offset); void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); - void (*phy_setup)(struct ksz_device *dev, int port, - struct phy_device *phy); void (*port_cleanup)(struct ksz_device *dev, int port); void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index dc9a3bb24114..00d680cb4441 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1456,7 +1456,7 @@ unsupported: phylink_set(mask, 100baseT_Full); if (state->interface != PHY_INTERFACE_MODE_MII) { - phylink_set(mask, 1000baseT_Half); + /* This switch only supports 1G full-duplex. */ phylink_set(mask, 1000baseT_Full); if (port == 5) phylink_set(mask, 1000baseX_Full); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 6787d560e9e3..92e4d140df6f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3063,7 +3063,6 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, - .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index ac88caca5ad4..1368816abaed 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -43,18 +43,26 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, int ret; int i; + dev_dbg(smi->dev, + "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", + vid, member, untag); + /* Update the 4K table */ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); if (ret) return ret; - vlan4k.member = member; - vlan4k.untag = untag; + vlan4k.member |= member; + vlan4k.untag |= untag; vlan4k.fid = fid; ret = smi->ops->set_vlan_4k(smi, &vlan4k); if (ret) return ret; + dev_dbg(smi->dev, + "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", + vid, vlan4k.member, vlan4k.untag); + /* Try to find an existing MC entry for this VID */ for (i = 0; i < smi->num_vlan_mc; i++) { struct rtl8366_vlan_mc vlanmc; @@ -65,11 +73,16 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, if (vid == vlanmc.vid) { /* update the MC entry */ - vlanmc.member = member; - vlanmc.untag = untag; + vlanmc.member |= member; + vlanmc.untag |= untag; vlanmc.fid = fid; ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + + dev_dbg(smi->dev, + "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n", + vid, vlanmc.member, vlanmc.untag); + break; } } @@ -384,7 +397,7 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) dev_err(smi->dev, "port is DSA or CPU port\n"); - for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { int pvid_val = 0; dev_info(smi->dev, "add VLAN %04x\n", vid); @@ -407,13 +420,13 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, if (ret < 0) return; } - } - ret = rtl8366_set_vlan(smi, port, member, untag, 0); - if (ret) - dev_err(smi->dev, - "failed to set up VLAN %04x", - vid); + ret = rtl8366_set_vlan(smi, vid, member, untag, 0); + if (ret) + dev_err(smi->dev, + "failed to set up VLAN %04x", + vid); + } } EXPORT_SYMBOL_GPL(rtl8366_vlan_add); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 0537df06a9b5..ff318472a3ee 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -432,7 +432,7 @@ static void emac_timeout(struct net_device *dev) /* Hardware start transmission. * Send a packet to media from the upper layer. */ -static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct emac_board_info *db = netdev_priv(dev); unsigned long channel; @@ -440,7 +440,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) channel = db->tx_fifo_stat & 3; if (channel == 3) - return 1; + return NETDEV_TX_BUSY; channel = (channel == 1 ? 1 : 0); diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 48de4bee209e..9225733f4fec 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -2349,6 +2349,9 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, rss->hash_key; int rc; + if (unlikely(!func)) + return -EINVAL; + rc = ena_com_get_feature_ex(ena_dev, &get_resp, ENA_ADMIN_RSS_HASH_FUNCTION, rss->hash_key_dma_addr, @@ -2361,8 +2364,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, if (rss->hash_func) rss->hash_func--; - if (func) - *func = rss->hash_func; + *func = rss->hash_func; if (key) memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 26325f7b3c1f..635345bced31 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2835,16 +2835,14 @@ static void ena_fw_reset_device(struct work_struct *work) { struct ena_adapter *adapter = container_of(work, struct ena_adapter, reset_task); - struct pci_dev *pdev = adapter->pdev; - if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { - dev_err(&pdev->dev, - "device reset schedule while reset bit is off\n"); - return; - } rtnl_lock(); - ena_destroy_device(adapter, false); - ena_restore_device(adapter); + + if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + ena_destroy_device(adapter, false); + ena_restore_device(adapter); + } + rtnl_unlock(); } @@ -2926,7 +2924,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, } u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.missed_tx = missed_tx; + tx_ring->tx_stats.missed_tx += missed_tx; u64_stats_update_end(&tx_ring->syncp); return rc; @@ -3675,8 +3673,11 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) netdev->rx_cpu_rmap = NULL; } #endif /* CONFIG_RFS_ACCEL */ - del_timer_sync(&adapter->timer_service); + /* Make sure timer and reset routine won't be called after + * freeing device resources. + */ + del_timer_sync(&adapter->timer_service); cancel_work_sync(&adapter->reset_task); rtnl_lock(); /* lock released inside the below if-else block */ @@ -3847,6 +3848,9 @@ static void ena_keep_alive_wd(void *adapter_data, rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; u64_stats_update_begin(&adapter->syncp); + /* These stats are accumulated by the device, so the counters indicate + * all drops since last reset. + */ adapter->dev_stats.rx_drops = rx_drops; u64_stats_update_end(&adapter->syncp); } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 12949f1ec1ea..145334fb18f4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -690,6 +690,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p) u32 *regs_buff = p; int err = 0; + if (unlikely(!self->aq_hw_ops->hw_get_regs)) + return -EOPNOTSUPP; + regs->version = 1; err = self->aq_hw_ops->hw_get_regs(self->aq_hw, @@ -704,6 +707,9 @@ err_exit: int aq_nic_get_regs_count(struct aq_nic_s *self) { + if (unlikely(!self->aq_hw_ops->hw_get_regs)) + return 0; + return self->aq_nic_cfg.aq_hw_caps->mac_regs_count; } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 359a4d387185..9a0db70c1143 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -776,7 +776,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, int err = 0; if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { - err = EBADRQC; + err = -EBADRQC; goto err_exit; } for (self->aq_nic_cfg->mc_list_count = 0U; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 6f340695e6bd..774e48b3f904 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -1597,7 +1597,7 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location, for (i = 0; i < 4; ++i) aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location + i), - ipv6_src[i]); + ipv6_src[3 - i]); } void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, @@ -1608,7 +1608,7 @@ void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, for (i = 0; i < 4; ++i) aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location + i), - ipv6_dest[i]); + ipv6_dest[3 - i]); } u32 hw_atl_sem_ram_get(struct aq_hw_s *self) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 35887ad89025..dd8d591404be 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -2564,7 +2564,7 @@ */ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */ -#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4) +#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053D0 + (filter) * 0x4) /* Bitmask for bitfield l3_da0[1F:0] */ #define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu /* Inverted bitmask for bitfield l3_da0[1F:0] */ diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 0187dbf3b87d..54cdafdd067d 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -153,6 +153,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) if (IS_ERR(data->reset_gpio)) { error = PTR_ERR(data->reset_gpio); dev_err(priv->dev, "Failed to request gpio: %d\n", error); + mdiobus_free(bus); return error; } diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 1b1a09095c0d..78e20f53677d 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -553,7 +553,8 @@ static int ag71xx_mdio_probe(struct ag71xx *ag) ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); if (IS_ERR(ag->mdio_reset)) { netif_err(ag, probe, ndev, "Failed to get reset mdio.\n"); - return PTR_ERR(ag->mdio_reset); + err = PTR_ERR(ag->mdio_reset); + goto mdio_err_put_clk; } mii_bus->name = "ag71xx_mdio"; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index d4bbcdfd691a..aa693c8e285a 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1249,8 +1249,12 @@ out_disable_adv_intr: static void __alx_stop(struct alx_priv *alx) { - alx_halt(alx); alx_free_irq(alx); + + cancel_work_sync(&alx->link_check_wk); + cancel_work_sync(&alx->reset_wk); + + alx_halt(alx); alx_free_rings(alx); alx_free_napis(alx); } @@ -1858,9 +1862,6 @@ static void alx_remove(struct pci_dev *pdev) struct alx_priv *alx = pci_get_drvdata(pdev); struct alx_hw *hw = &alx->hw; - cancel_work_sync(&alx->link_check_wk); - cancel_work_sync(&alx->reset_wk); - /* restore permanent mac address */ alx_set_macaddr(hw, hw->perm_addr); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 4dfdb5a58025..71eb8914e620 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2453,8 +2453,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->tx_rings = devm_kcalloc(&pdev->dev, txq, sizeof(struct bcm_sysport_tx_ring), GFP_KERNEL); - if (!priv->tx_rings) - return -ENOMEM; + if (!priv->tx_rings) { + ret = -ENOMEM; + goto err_free_netdev; + } priv->is_lite = params->is_lite; priv->num_rx_desc_words = params->num_rx_desc_words; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 63ee0c49be7c..4030020f92be 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1143,6 +1143,9 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) { + if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) + return; + if (BNXT_PF(bp)) queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); else @@ -1159,10 +1162,12 @@ static void bnxt_queue_sp_work(struct bnxt *bp) static void bnxt_cancel_sp_work(struct bnxt *bp) { - if (BNXT_PF(bp)) + if (BNXT_PF(bp)) { flush_workqueue(bnxt_pf_wq); - else + } else { cancel_work_sync(&bp->sp_task); + cancel_delayed_work_sync(&bp->fw_reset_task); + } } static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) @@ -3423,7 +3428,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp) */ void bnxt_set_ring_params(struct bnxt *bp) { - u32 ring_size, rx_size, rx_space; + u32 ring_size, rx_size, rx_space, max_rx_cmpl; u32 agg_factor = 0, agg_ring_size = 0; /* 8 for CRC and VLAN */ @@ -3479,7 +3484,15 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; - ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; + max_rx_cmpl = bp->rx_ring_size; + /* MAX TPA needs to be added because TPA_START completions are + * immediately recycled, so the TPA completions are not bound by + * the RX ring size. + */ + if (bp->flags & BNXT_FLAG_TPA) + max_rx_cmpl += bp->max_tpa; + /* RX and TPA completions are 32-byte, all others are 16-byte */ + ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; bp->cp_ring_size = ring_size; bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); @@ -8925,16 +8938,19 @@ static ssize_t bnxt_show_temp(struct device *dev, struct hwrm_temp_monitor_query_input req = {0}; struct hwrm_temp_monitor_query_output *resp; struct bnxt *bp = dev_get_drvdata(dev); - u32 temp = 0; + u32 len = 0; resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); - if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) - temp = resp->temp * 1000; /* display millidegree */ + if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ mutex_unlock(&bp->hwrm_cmd_lock); - return sprintf(buf, "%u\n", temp); + if (len) + return len; + + return sprintf(buf, "unknown\n"); } static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); @@ -9116,15 +9132,15 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } } - bnxt_enable_napi(bp); - bnxt_debug_dev_init(bp); - rc = bnxt_init_nic(bp, irq_re_init); if (rc) { netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); - goto open_err; + goto open_err_irq; } + bnxt_enable_napi(bp); + bnxt_debug_dev_init(bp); + if (link_re_init) { mutex_lock(&bp->link_lock); rc = bnxt_update_phy_setting(bp); @@ -9155,10 +9171,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_vf_reps_open(bp); return 0; -open_err: - bnxt_debug_dev_exit(bp); - bnxt_disable_napi(bp); - open_err_irq: bnxt_del_napi(bp); @@ -9992,7 +10004,7 @@ static void bnxt_timer(struct timer_list *t) struct bnxt *bp = from_timer(bp, t, timer); struct net_device *dev = bp->dev; - if (!netif_running(dev)) + if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) return; if (atomic_read(&bp->intr_sem) != 0) @@ -11378,6 +11390,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) unregister_netdev(dev); bnxt_dl_unregister(bp); bnxt_shutdown_tc(bp); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); bnxt_cancel_sp_work(bp); bp->sp_event = 0; @@ -11892,6 +11905,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) (long)pci_resource_start(pdev, 0), dev->dev_addr); pcie_print_link_status(pdev); + pci_save_state(pdev); return 0; init_err_cleanup: @@ -12058,6 +12072,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) "Cannot re-enable PCI device after reset.\n"); } else { pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); err = bnxt_hwrm_func_reset(bp); if (!err && netif_running(netdev)) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index fb1ab58da9fa..fd01bcc8e28d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -769,7 +769,7 @@ static void bnxt_get_channels(struct net_device *dev, int max_tx_sch_inputs; /* Get the most up-to-date max_tx_sch_inputs. */ - if (BNXT_NEW_RM(bp)) + if (netif_running(dev) && BNXT_NEW_RM(bp)) bnxt_hwrm_func_resc_qcaps(bp, false); max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; @@ -1688,8 +1688,11 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (epause->tx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; - if (netif_running(dev)) + if (netif_running(dev)) { + mutex_lock(&bp->link_lock); rc = bnxt_hwrm_set_pause(bp); + mutex_unlock(&bp->link_lock); + } return rc; } @@ -2158,6 +2161,9 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) if (rc != 0) return rc; + if (!dir_entries || !entry_length) + return -EIO; + /* Insert 2 bytes of directory info (count and size of entries) */ if (len < 2) return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 1046b22220a3..452be9749827 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -398,6 +398,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp) } } + bp->pf.active_vfs = 0; kfree(bp->pf.vf); bp->pf.vf = NULL; } @@ -833,7 +834,6 @@ void bnxt_sriov_disable(struct bnxt *bp) bnxt_free_vf_resources(bp); - bp->pf.active_vfs = 0; /* Reclaim all resources for the PF. */ rtnl_lock(); bnxt_restore_pf_fw_resources(bp); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 6f01f4e03cef..03f82786c0b9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -69,6 +69,9 @@ #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ TOTAL_DESC * DMA_DESC_SIZE) +/* Forward declarations */ +static void bcmgenet_set_rx_mode(struct net_device *dev); + static inline void bcmgenet_writel(u32 value, void __iomem *offset) { /* MIPS chips strapped for BE will automagically configure the @@ -1588,11 +1591,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } - if (skb_padto(skb, ETH_ZLEN)) { - ret = NETDEV_TX_OK; - goto out; - } - /* Retain how many bytes will be sent on the wire, without TSB inserted * by transmit checksum offload */ @@ -1641,6 +1639,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) len_stat = (size << DMA_BUFLENGTH_SHIFT) | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ if (!i) { len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; if (skb->ip_summed == CHECKSUM_PARTIAL) @@ -2852,6 +2853,7 @@ static void bcmgenet_netif_start(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); /* Start the network engine */ + bcmgenet_set_rx_mode(dev); bcmgenet_enable_rx_napi(priv); umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index dbc69d8fa05f..5b7c2f9241d0 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -14,6 +14,7 @@ #include #include #include +#include /* total number of Buffer Descriptors, same for Rx/Tx */ #define TOTAL_DESC 256 @@ -674,6 +675,7 @@ struct bcmgenet_priv { /* WOL */ struct clk *clk_wol; u32 wolopts; + u8 sopass[SOPASS_MAX]; struct bcmgenet_mib_counters mib; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index ea20d94bd050..a41f82379369 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -41,18 +41,13 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmgenet_priv *priv = netdev_priv(dev); - u32 reg; wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; wol->wolopts = priv->wolopts; memset(wol->sopass, 0, sizeof(wol->sopass)); - if (wol->wolopts & WAKE_MAGICSECURE) { - reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS); - put_unaligned_be16(reg, &wol->sopass[0]); - reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS); - put_unaligned_be32(reg, &wol->sopass[2]); - } + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); } /* ethtool function - set WOL (Wake on LAN) settings. @@ -62,7 +57,6 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmgenet_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; - u32 reg; if (!device_can_wakeup(kdev)) return -ENOTSUPP; @@ -70,17 +64,8 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) return -EINVAL; - reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); - if (wol->wolopts & WAKE_MAGICSECURE) { - bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), - UMAC_MPD_PW_MS); - bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), - UMAC_MPD_PW_LS); - reg |= MPD_PW_EN; - } else { - reg &= ~MPD_PW_EN; - } - bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); /* Flag the device and relevant IRQ as wakeup capable */ if (wol->wolopts) { @@ -120,6 +105,14 @@ static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) return retries; } +static void bcmgenet_set_mpd_password(struct bcmgenet_priv *priv) +{ + bcmgenet_umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), + UMAC_MPD_PW_MS); + bcmgenet_umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), + UMAC_MPD_PW_LS); +} + int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, enum bcmgenet_power_mode mode) { @@ -140,13 +133,17 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); reg |= MPD_EN; + if (priv->wolopts & WAKE_MAGICSECURE) { + bcmgenet_set_mpd_password(priv); + reg |= MPD_PW_EN; + } bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); /* Do not leave UniMAC in MPD mode only */ retries = bcmgenet_poll_wol_status(priv); if (retries < 0) { reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); - reg &= ~MPD_EN; + reg &= ~(MPD_EN | MPD_PW_EN); bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); return retries; } @@ -185,7 +182,7 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); if (!(reg & MPD_EN)) return; /* already powered up so skip the rest */ - reg &= ~MPD_EN; + reg &= ~(MPD_EN | MPD_PW_EN); bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); /* Disable CRC Forward */ diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ca3aa1250dd1..70bd79dc43f2 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7227,8 +7227,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp) static inline void tg3_reset_task_cancel(struct tg3 *tp) { - cancel_work_sync(&tp->reset_task); - tg3_flag_clear(tp, RESET_TASK_PENDING); + if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) + cancel_work_sync(&tp->reset_task); tg3_flag_clear(tp, TX_RECOVERY_PENDING); } @@ -11219,18 +11219,27 @@ static void tg3_reset_task(struct work_struct *work) tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); err = tg3_init_hw(tp, true); - if (err) + if (err) { + tg3_full_unlock(tp); + tp->irq_sync = 0; + tg3_napi_enable(tp); + /* Clear this flag so that tg3_reset_task_cancel() will not + * call cancel_work_sync() and wait forever. + */ + tg3_flag_clear(tp, RESET_TASK_PENDING); + dev_close(tp->dev); goto out; + } tg3_netif_start(tp); -out: tg3_full_unlock(tp); if (!err) tg3_phy_start(tp); tg3_flag_clear(tp, RESET_TASK_PENDING); +out: rtnl_unlock(); } @@ -18176,8 +18185,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); - /* We probably don't have netdev yet */ - if (!netdev || !netif_running(netdev)) + /* Could be second call or maybe we don't have netdev yet */ + if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) goto done; /* We needn't recover from permanent error */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 1ec19d9fab00..a5c4d4d66df3 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3708,7 +3708,7 @@ static int at91ether_open(struct net_device *dev) ret = at91ether_start(dev); if (ret) - return ret; + goto pm_exit; /* Enable MAC interrupts */ macb_writel(lp, IER, MACB_BIT(RCOMP) | @@ -3725,6 +3725,10 @@ static int at91ether_open(struct net_device *dev) netif_start_queue(dev); return 0; + +pm_exit: + pm_runtime_put_sync(&lp->pdev->dev); + return ret; } /* Close the interface */ @@ -4260,7 +4264,7 @@ static int macb_probe(struct platform_device *pdev) bp->wol = 0; if (of_get_property(np, "magic-packet", NULL)) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; - device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); spin_lock_init(&bp->lock); @@ -4453,7 +4457,8 @@ static int __maybe_unused macb_suspend(struct device *dev) netif_carrier_off(netdev); if (bp->ptp_info) bp->ptp_info->ptp_remove(netdev); - pm_runtime_force_suspend(dev); + if (!device_may_wakeup(dev)) + pm_runtime_force_suspend(dev); return 0; } @@ -4468,7 +4473,8 @@ static int __maybe_unused macb_resume(struct device *dev) if (!netif_running(netdev)) return 0; - pm_runtime_force_resume(dev); + if (!device_may_wakeup(dev)) + pm_runtime_force_resume(dev); if (bp->wol & MACB_WOL_ENABLED) { macb_writel(bp, IDR, MACB_BIT(WOL)); @@ -4507,7 +4513,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); - if (!(device_may_wakeup(&bp->dev->dev))) { + if (!(device_may_wakeup(dev))) { clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); @@ -4523,7 +4529,7 @@ static int __maybe_unused macb_runtime_resume(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); - if (!(device_may_wakeup(&bp->dev->dev))) { + if (!(device_may_wakeup(dev))) { clk_prepare_enable(bp->pclk); clk_prepare_enable(bp->hclk); clk_prepare_enable(bp->tx_clk); diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 43d11c38b38a..4cddd628d41b 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1167,7 +1167,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct) oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & CN23XX_PCIE_SRIOV_FDL_MASK); } else { - ret = EINVAL; + ret = -EINVAL; /* Under some virtual environments, extended PCI regs are * inaccessible, in which case the above read will have failed. diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index cdd7e5da4a74..d375e438d805 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -235,6 +235,11 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) /* Put it in the ring. */ p->rx_ring[p->rx_next_fill] = re.d64; + /* Make sure there is no reorder of filling the ring and ringing + * the bell + */ + wmb(); + dma_sync_single_for_device(p->dev, p->rx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), DMA_BIDIRECTIONAL); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 40a44dcb3d9b..5c45c0c6dd23 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2047,11 +2047,11 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) /* Save message data locally to prevent them from * being overwritten by next ndo_set_rx_mode call(). */ - spin_lock(&nic->rx_mode_wq_lock); + spin_lock_bh(&nic->rx_mode_wq_lock); mode = vf_work->mode; mc = vf_work->mc; vf_work->mc = NULL; - spin_unlock(&nic->rx_mode_wq_lock); + spin_unlock_bh(&nic->rx_mode_wq_lock); __nicvf_set_rx_mode_task(mode, mc, nic); } @@ -2185,6 +2185,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->max_queues *= 2; nic->ptp_clock = ptp_clock; + /* Initialize mutex that serializes usage of VF's mailbox */ + mutex_init(&nic->rx_mode_mtx); + /* MAP VF's configuration registers */ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!nic->reg_base) { @@ -2261,7 +2264,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); spin_lock_init(&nic->rx_mode_wq_lock); - mutex_init(&nic->rx_mode_mtx); err = register_netdev(netdev); if (err) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 7bcdce182ee5..e26ae298a080 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1980,7 +1980,6 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, u8 mem_type[CTXT_INGRESS + 1] = { 0 }; struct cudbg_buffer temp_buff = { 0 }; struct cudbg_ch_cntxt *buff; - u64 *dst_off, *src_off; u8 *ctx_buf; u8 i, k; int rc; @@ -2049,8 +2048,11 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, } for (j = 0; j < max_ctx_qid; j++) { + __be64 *dst_off; + u64 *src_off; + src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE); - dst_off = (u64 *)buff->data; + dst_off = (__be64 *)buff->data; /* The data is stored in 64-bit cpu order. Convert it * to big endian before parsing. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 43b0f8c57da7..f459313357c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -165,6 +165,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, unsigned int tid, bool dip, bool sip, bool dp, bool sp) { + u8 *nat_lp = (u8 *)&f->fs.nat_lport; + u8 *nat_fp = (u8 *)&f->fs.nat_fport; + if (dip) { if (f->fs.type) { set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W, @@ -236,8 +239,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, } set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK, - (dp ? f->fs.nat_lport : 0) | - (sp ? f->fs.nat_fport << 16 : 0), 1); + (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) | + (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0), + 1); } /* Validate filter spec against configuration done on the card. */ @@ -656,6 +660,9 @@ int set_filter_wr(struct adapter *adapter, int fidx) fwr->fpm = htons(f->fs.mask.fport); if (adapter->params.filter2_wr_support) { + u8 *nat_lp = (u8 *)&f->fs.nat_lport; + u8 *nat_fp = (u8 *)&f->fs.nat_fport; + fwr->natmode_to_ulp_type = FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ? ULP_MODE_TCPDDP : @@ -663,8 +670,8 @@ int set_filter_wr(struct adapter *adapter, int fidx) FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode); memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip)); memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip)); - fwr->newlport = htons(f->fs.nat_lport); - fwr->newfport = htons(f->fs.nat_fport); + fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8); + fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8); } /* Mark the filter as "pending" and ship off the Filter Work Request. @@ -832,16 +839,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family) struct in_addr *addr; addr = (struct in_addr *)ipmask; - if (addr->s_addr == 0xffffffff) + if (addr->s_addr == htonl(0xffffffff)) return true; } else if (family == AF_INET6) { struct in6_addr *addr6; addr6 = (struct in6_addr *)ipmask; - if (addr6->s6_addr32[0] == 0xffffffff && - addr6->s6_addr32[1] == 0xffffffff && - addr6->s6_addr32[2] == 0xffffffff && - addr6->s6_addr32[3] == 0xffffffff) + if (addr6->s6_addr32[0] == htonl(0xffffffff) && + addr6->s6_addr32[1] == htonl(0xffffffff) && + addr6->s6_addr32[2] == htonl(0xffffffff) && + addr6->s6_addr32[3] == htonl(0xffffffff)) return true; } return false; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 069a51847885..deb1c1f30107 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2504,7 +2504,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, /* Clear out filter specifications */ memset(&f->fs, 0, sizeof(struct ch_filter_specification)); - f->fs.val.lport = cpu_to_be16(sport); + f->fs.val.lport = be16_to_cpu(sport); f->fs.mask.lport = ~0; val = (u8 *)&sip; if ((val[0] | val[1] | val[2] | val[3]) != 0) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index e447976bdd3e..16a939f9b04d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -58,10 +58,6 @@ static struct ch_tc_pedit_fields pedits[] = { PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4), PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8), PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12), - PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0), - PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0), - PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0), - PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0), }; static struct ch_tc_flower_entry *allocate_flower_entry(void) @@ -156,14 +152,14 @@ static void cxgb4_process_flow_match(struct net_device *dev, struct flow_match_ports match; flow_rule_match_ports(rule, &match); - fs->val.lport = cpu_to_be16(match.key->dst); - fs->mask.lport = cpu_to_be16(match.mask->dst); - fs->val.fport = cpu_to_be16(match.key->src); - fs->mask.fport = cpu_to_be16(match.mask->src); + fs->val.lport = be16_to_cpu(match.key->dst); + fs->mask.lport = be16_to_cpu(match.mask->dst); + fs->val.fport = be16_to_cpu(match.key->src); + fs->mask.fport = be16_to_cpu(match.mask->src); /* also initialize nat_lport/fport to same values */ - fs->nat_lport = cpu_to_be16(match.key->dst); - fs->nat_fport = cpu_to_be16(match.key->src); + fs->nat_lport = fs->val.lport; + fs->nat_fport = fs->val.fport; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { @@ -354,12 +350,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, switch (offset) { case PEDIT_TCP_SPORT_DPORT: if (~mask & PEDIT_TCP_UDP_SPORT_MASK) - offload_pedit(fs, cpu_to_be32(val) >> 16, - cpu_to_be32(mask) >> 16, - TCP_SPORT); + fs->nat_fport = val; else - offload_pedit(fs, cpu_to_be32(val), - cpu_to_be32(mask), TCP_DPORT); + fs->nat_lport = val >> 16; } fs->nat_mode = NAT_MODE_ALL; break; @@ -367,12 +360,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, switch (offset) { case PEDIT_UDP_SPORT_DPORT: if (~mask & PEDIT_TCP_UDP_SPORT_MASK) - offload_pedit(fs, cpu_to_be32(val) >> 16, - cpu_to_be32(mask) >> 16, - UDP_SPORT); + fs->nat_fport = val; else - offload_pedit(fs, cpu_to_be32(val), - cpu_to_be32(mask), UDP_DPORT); + fs->nat_lport = val >> 16; } fs->nat_mode = NAT_MODE_ALL; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 02fc63fa7f25..b3a342561a96 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -47,7 +47,7 @@ static int fill_match_fields(struct adapter *adap, bool next_header) { unsigned int i, j; - u32 val, mask; + __be32 val, mask; int off, err; bool found; @@ -216,7 +216,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) const struct cxgb4_next_header *next; bool found = false; unsigned int i, j; - u32 val, mask; + __be32 val, mask; int off; if (t->table[link_uhtid - 1].link_handle) { @@ -230,10 +230,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) /* Try to find matches that allow jumps to next header. */ for (i = 0; next[i].jump; i++) { - if (next[i].offoff != cls->knode.sel->offoff || - next[i].shift != cls->knode.sel->offshift || - next[i].mask != cls->knode.sel->offmask || - next[i].offset != cls->knode.sel->off) + if (next[i].sel.offoff != cls->knode.sel->offoff || + next[i].sel.offshift != cls->knode.sel->offshift || + next[i].sel.offmask != cls->knode.sel->offmask || + next[i].sel.off != cls->knode.sel->off) continue; /* Found a possible candidate. Find a key that @@ -245,9 +245,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) val = cls->knode.sel->keys[j].val; mask = cls->knode.sel->keys[j].mask; - if (next[i].match_off == off && - next[i].match_val == val && - next[i].match_mask == mask) { + if (next[i].key.off == off && + next[i].key.val == val && + next[i].key.mask == mask) { found = true; break; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h index a4b99edcc339..141085e159e5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h @@ -38,12 +38,12 @@ struct cxgb4_match_field { int off; /* Offset from the beginning of the header to match */ /* Fill the value/mask pair in the spec if matched */ - int (*val)(struct ch_filter_specification *f, u32 val, u32 mask); + int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask); }; /* IPv4 match fields */ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.tos = (ntohl(val) >> 16) & 0x000000FF; f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF; @@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { u32 mask_val; u8 frag_val; @@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.proto = (ntohl(val) >> 16) & 0x000000FF; f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF; @@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[0], &val, sizeof(u32)); memcpy(&f->mask.fip[0], &mask, sizeof(u32)); @@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[0], &val, sizeof(u32)); memcpy(&f->mask.lip[0], &mask, sizeof(u32)); @@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = { /* IPv6 match fields */ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.tos = (ntohl(val) >> 20) & 0x000000FF; f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF; @@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.proto = (ntohl(val) >> 8) & 0x000000FF; f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF; @@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[0], &val, sizeof(u32)); memcpy(&f->mask.fip[0], &mask, sizeof(u32)); @@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[4], &val, sizeof(u32)); memcpy(&f->mask.fip[4], &mask, sizeof(u32)); @@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[8], &val, sizeof(u32)); memcpy(&f->mask.fip[8], &mask, sizeof(u32)); @@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[12], &val, sizeof(u32)); memcpy(&f->mask.fip[12], &mask, sizeof(u32)); @@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[0], &val, sizeof(u32)); memcpy(&f->mask.lip[0], &mask, sizeof(u32)); @@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[4], &val, sizeof(u32)); memcpy(&f->mask.lip[4], &mask, sizeof(u32)); @@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[8], &val, sizeof(u32)); memcpy(&f->mask.lip[8], &mask, sizeof(u32)); @@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[12], &val, sizeof(u32)); memcpy(&f->mask.lip[12], &mask, sizeof(u32)); @@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = { /* TCP/UDP match */ static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.fport = ntohl(val) >> 16; f->mask.fport = ntohl(mask) >> 16; @@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = { }; struct cxgb4_next_header { - unsigned int offset; /* Offset to next header */ - /* offset, shift, and mask added to offset above + /* Offset, shift, and mask added to beginning of the header * to get to next header. Useful when using a header * field's value to jump to next header such as IHL field * in IPv4 header. */ - unsigned int offoff; - u32 shift; - u32 mask; - /* match criteria to make this jump */ - unsigned int match_off; - u32 match_val; - u32 match_mask; + struct tc_u32_sel sel; + struct tc_u32_key key; /* location of jump to make */ const struct cxgb4_match_field *jump; }; @@ -258,26 +252,74 @@ struct cxgb4_next_header { * IPv4 header. */ static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = { - { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, - .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00, - .jump = cxgb4_tcp_fields }, - { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, - .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00, - .jump = cxgb4_udp_fields }, - { .jump = NULL } + { + /* TCP Jump */ + .sel = { + .off = 0, + .offoff = 0, + .offshift = 6, + .offmask = cpu_to_be16(0x0f00), + }, + .key = { + .off = 8, + .val = cpu_to_be32(0x00060000), + .mask = cpu_to_be32(0x00ff0000), + }, + .jump = cxgb4_tcp_fields, + }, + { + /* UDP Jump */ + .sel = { + .off = 0, + .offoff = 0, + .offshift = 6, + .offmask = cpu_to_be16(0x0f00), + }, + .key = { + .off = 8, + .val = cpu_to_be32(0x00110000), + .mask = cpu_to_be32(0x00ff0000), + }, + .jump = cxgb4_udp_fields, + }, + { .jump = NULL }, }; /* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header * to get to transport layer header. */ static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = { - { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, - .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000, - .jump = cxgb4_tcp_fields }, - { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, - .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000, - .jump = cxgb4_udp_fields }, - { .jump = NULL } + { + /* TCP Jump */ + .sel = { + .off = 40, + .offoff = 0, + .offshift = 0, + .offmask = 0, + }, + .key = { + .off = 4, + .val = cpu_to_be32(0x00000600), + .mask = cpu_to_be32(0x0000ff00), + }, + .jump = cxgb4_tcp_fields, + }, + { + /* UDP Jump */ + .sel = { + .off = 40, + .offoff = 0, + .offshift = 0, + .offmask = 0, + }, + .key = { + .off = 4, + .val = cpu_to_be32(0x00001100), + .mask = cpu_to_be32(0x0000ff00), + }, + .jump = cxgb4_udp_fields, + }, + { .jump = NULL }, }; struct cxgb4_link { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c index 3de8a5e83b6c..d7fefdbf3e57 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c @@ -62,6 +62,7 @@ static struct thermal_zone_device_ops cxgb4_thermal_ops = { int cxgb4_thermal_init(struct adapter *adap) { struct ch_thermal *ch_thermal = &adap->ch_thermal; + char ch_tz_name[THERMAL_NAME_LENGTH]; int num_trip = CXGB4_NUM_TRIPS; u32 param, val; int ret; @@ -82,7 +83,8 @@ int cxgb4_thermal_init(struct adapter *adap) ch_thermal->trip_type = THERMAL_TRIP_CRITICAL; } - ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip, + snprintf(ch_tz_name, sizeof(ch_tz_name), "cxgb4_%s", adap->name); + ch_thermal->tzdev = thermal_zone_device_register(ch_tz_name, num_trip, 0, adap, &cxgb4_thermal_ops, NULL, 0, 0); @@ -97,7 +99,9 @@ int cxgb4_thermal_init(struct adapter *adap) int cxgb4_thermal_remove(struct adapter *adap) { - if (adap->ch_thermal.tzdev) + if (adap->ch_thermal.tzdev) { thermal_zone_device_unregister(adap->ch_thermal.tzdev); + adap->ch_thermal.tzdev = NULL; + } return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index e6fe2870137b..a440c1cf0b61 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -506,41 +506,20 @@ u64 cxgb4_select_ntuple(struct net_device *dev, } EXPORT_SYMBOL(cxgb4_select_ntuple); -/* - * Called when address resolution fails for an L2T entry to handle packets - * on the arpq head. If a packet specifies a failure handler it is invoked, - * otherwise the packet is sent to the device. - */ -static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e) -{ - struct sk_buff *skb; - - while ((skb = __skb_dequeue(&e->arpq)) != NULL) { - const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); - - spin_unlock(&e->lock); - if (cb->arp_err_handler) - cb->arp_err_handler(cb->handle, skb); - else - t4_ofld_send(adap, skb); - spin_lock(&e->lock); - } -} - /* * Called when the host's neighbor layer makes a change to some entry that is * loaded into the HW L2 table. */ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) { - struct l2t_entry *e; - struct sk_buff_head *arpq = NULL; - struct l2t_data *d = adap->l2t; unsigned int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *) neigh->primary_key; - int ifidx = neigh->dev->ifindex; - int hash = addr_hash(d, addr, addr_len, ifidx); + int hash, ifidx = neigh->dev->ifindex; + struct sk_buff_head *arpq = NULL; + struct l2t_data *d = adap->l2t; + struct l2t_entry *e; + hash = addr_hash(d, addr, addr_len, ifidx); read_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) if (!addreq(e, addr) && e->ifindex == ifidx) { @@ -573,8 +552,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) write_l2e(adap, e, 0); } - if (arpq) - handle_failed_resolution(adap, e); + if (arpq) { + struct sk_buff *skb; + + /* Called when address resolution fails for an L2T + * entry to handle packets on the arpq head. If a + * packet specifies a failure handler it is invoked, + * otherwise the packet is sent to the device. + */ + while ((skb = __skb_dequeue(&e->arpq)) != NULL) { + const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); + + spin_unlock(&e->lock); + if (cb->arp_err_handler) + cb->arp_err_handler(cb->handle, skb); + else + t4_ofld_send(adap, skb); + spin_lock(&e->lock); + } + } spin_unlock_bh(&e->lock); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 3a45ac8f0e01..049f1bbe27ab 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2441,6 +2441,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb, txq_info = adap->sge.uld_txq_info[tx_uld_type]; if (unlikely(!txq_info)) { WARN_ON(true); + kfree_skb(skb); return NET_XMIT_DROP; } @@ -2816,7 +2817,7 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter, hwtstamps = skb_hwtstamps(skb); memset(hwtstamps, 0, sizeof(*hwtstamps)); - hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data))); + hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data)); return RX_PTP_PKT_SUC; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 31fcfc58e337..588b63473c47 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3499,7 +3499,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, drv_fw = &fw_info->fw_hdr; /* Read the header of the firmware on the card */ - ret = -t4_read_flash(adap, FLASH_FW_START, + ret = t4_read_flash(adap, FLASH_FW_START, sizeof(*card_fw) / sizeof(uint32_t), (uint32_t *)card_fw, 1); if (ret == 0) { @@ -3528,8 +3528,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, should_install_fs_fw(adap, card_fw_usable, be32_to_cpu(fs_fw->fw_ver), be32_to_cpu(card_fw->fw_ver))) { - ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, - fw_size, 0); + ret = t4_fw_upgrade(adap, adap->mbox, fw_data, + fw_size, 0); if (ret != 0) { dev_err(adap->pdev_dev, "failed to install firmware: %d\n", ret); @@ -3560,7 +3560,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); - ret = EINVAL; + ret = -EINVAL; goto bye; } diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 2814b96751b4..c9fb1ec625d8 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2388,7 +2388,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) dev_info(dev, "probe %s ID %d\n", dev_name(dev), id); - netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM); + netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM); if (!netdev) { dev_err(dev, "Can't allocate ethernet device #%d\n", id); return -ENOMEM; @@ -2445,7 +2445,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->reset = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(port->reset)) { dev_err(dev, "no reset\n"); - return PTR_ERR(port->reset); + ret = PTR_ERR(port->reset); + goto unprepare; } reset_control_reset(port->reset); usleep_range(100, 500); @@ -2501,23 +2502,24 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port_names[port->id], port); if (ret) - return ret; + goto unprepare; ret = register_netdev(netdev); - if (!ret) { - netdev_info(netdev, - "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n", - port->irq, &dmares->start, - &gmacres->start); - ret = gmac_setup_phy(netdev); - if (ret) - netdev_info(netdev, - "PHY init failed, deferring to ifup time\n"); - return 0; - } + if (ret) + goto unprepare; - port->netdev = NULL; - free_netdev(netdev); + netdev_info(netdev, + "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n", + port->irq, &dmares->start, + &gmacres->start); + ret = gmac_setup_phy(netdev); + if (ret) + netdev_info(netdev, + "PHY init failed, deferring to ifup time\n"); + return 0; + +unprepare: + clk_disable_unprepare(port->pclk); return ret; } @@ -2526,7 +2528,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev) struct gemini_ethernet_port *port = platform_get_drvdata(pdev); gemini_port_remove(port); - free_netdev(port->netdev); return 0; } diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 92fd895889fa..a0b556cc4e06 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -3024,7 +3024,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) } /* Do this here, so we can be verbose early */ - SET_NETDEV_DEV(net_dev, dev->parent); + SET_NETDEV_DEV(net_dev, dev); dev_set_drvdata(dev, net_dev); priv = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index db22a4150201..11ede221ef36 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2121,7 +2121,7 @@ static int dpaa2_eth_setup_mqprio(struct net_device *net_dev, if (num_tc > dpaa2_eth_tc_count(priv)) { netdev_err(net_dev, "Max %d traffic classes supported\n", dpaa2_eth_tc_count(priv)); - return -EINVAL; + return -EOPNOTSUPP; } if (!num_tc) { @@ -2225,7 +2225,7 @@ close: free: fsl_mc_object_free(dpcon); - return NULL; + return ERR_PTR(err); } static void free_dpcon(struct dpaa2_eth_priv *priv, @@ -2249,8 +2249,8 @@ alloc_channel(struct dpaa2_eth_priv *priv) return NULL; channel->dpcon = setup_dpcon(priv); - if (IS_ERR_OR_NULL(channel->dpcon)) { - err = PTR_ERR_OR_ZERO(channel->dpcon); + if (IS_ERR(channel->dpcon)) { + err = PTR_ERR(channel->dpcon); goto err_setup; } diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 42746a9a5a76..7db1cc1c3d3e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -260,7 +260,7 @@ static irqreturn_t enetc_msix(int irq, void *data) /* disable interrupts */ enetc_wr_reg_hot(v->rbier, 0); - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0); read_unlock(&enetc_mdio_lock); @@ -300,7 +300,7 @@ static int enetc_poll(struct napi_struct *napi, int budget) /* enable interrupts */ enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), ENETC_TBIER_TXTIE); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 941c816d3ea8..09df434b2f87 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3968,11 +3968,11 @@ failed_mii_init: failed_irq: failed_init: fec_ptp_stop(pdev); - if (fep->reg_phy) - regulator_disable(fep->reg_phy); failed_reset: pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); failed_regulator: clk_disable_unprepare(fep->clk_ahb); failed_clk_ahb: diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 7b5d27a5a172..a255610e320a 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -1418,8 +1418,7 @@ static void enable_time_stamp(struct fman *fman) { struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; u16 fm_clk_freq = fman->state->fm_clk_freq; - u32 tmp, intgr, ts_freq; - u64 frac; + u32 tmp, intgr, ts_freq, frac; ts_freq = (u32)(1 << fman->state->count1_micro_bit); /* configure timestamp so that bit 8 will count 1 microsecond diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 1ca543ac8f2c..d2de9ea80c43 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1205,7 +1205,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) list_for_each(pos, &dtsec->multicast_addr_hash->lsts[bucket]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; @@ -1218,7 +1218,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) list_for_each(pos, &dtsec->unicast_addr_hash->lsts[bucket]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h index dd6d0526f6c1..19f327efdaff 100644 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h @@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size) struct eth_hash_t *hash; /* Allocate address hash table */ - hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL); + hash = kmalloc(sizeof(*hash), GFP_KERNEL); if (!hash) return NULL; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index e1901874c19f..9088b4f4b4b8 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -856,7 +856,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, tmp = ioread32be(®s->command_config); tmp &= ~CMD_CFG_PFC_MODE; - priority = 0; iowrite32be(tmp, ®s->command_config); @@ -986,7 +985,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index bd76c9730692..5ef39a36842f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1770,6 +1770,7 @@ static int fman_port_probe(struct platform_device *of_dev) struct fman_port *port; struct fman *fman; struct device_node *fm_node, *port_node; + struct platform_device *fm_pdev; struct resource res; struct resource *dev_res; u32 val; @@ -1794,8 +1795,14 @@ static int fman_port_probe(struct platform_device *of_dev) goto return_err; } - fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev); + fm_pdev = of_find_device_by_node(fm_node); of_node_put(fm_node); + if (!fm_pdev) { + err = -EINVAL; + goto return_err; + } + + fman = dev_get_drvdata(&fm_pdev->dev); if (!fman) { err = -EINVAL; goto return_err; diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index f75b9c11b2d2..ac5a281e0ec3 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -630,7 +630,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2580bcd85025..3978d82c9598 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -751,8 +751,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) continue; err = gfar_parse_group(child, priv, model); - if (err) + if (err) { + of_node_put(child); goto err_grp_init; + } } } else { /* SQ_SG_MODE */ err = gfar_parse_group(np, priv, model); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index eb69e5c81a4d..6d5d53cfc7ab 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -2296,8 +2296,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->enet_ver = AE_VERSION_1; else if (acpi_dev_found(hns_enet_acpi_match[1].id)) priv->enet_ver = AE_VERSION_2; - else - return -ENXIO; + else { + ret = -ENXIO; + goto out_read_prop_fail; + } /* try to find port-idx-in-ae first */ ret = acpi_node_get_property_reference(dev->fwnode, @@ -2313,7 +2315,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->fwnode = args.fwnode; } else { dev_err(dev, "cannot read cfg data from OF or acpi\n"); - return -ENXIO; + ret = -ENXIO; + goto out_read_prop_fail; } ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index a0998937727d..0db835d87d09 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -77,6 +77,7 @@ ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num) enum hns_desc_type { + DESC_TYPE_UNKNOWN, DESC_TYPE_SKB, DESC_TYPE_PAGE, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 403e0f089f2a..92af7204711c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "hnae3.h" #include "hns3_enet.h" @@ -795,7 +796,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, * and it is udp packet, which has a dest port as the IANA assigned. * the hardware is expected to do the checksum offload, but the * hardware will not do the checksum offload when udp dest port is - * 4789. + * 4789 or 6081. */ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) { @@ -804,7 +805,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) l4.hdr = skb_transport_header(skb); if (!(!skb->encapsulation && - l4.udp->dest == htons(IANA_VXLAN_UDP_PORT))) + (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || + l4.udp->dest == htons(GENEVE_UDP_PORT)))) return false; skb_checksum_help(skb); @@ -1292,6 +1294,10 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) unsigned int i; for (i = 0; i < ring->desc_num; i++) { + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + + memset(desc, 0, sizeof(*desc)); + /* check if this is where we started */ if (ring->next_to_use == next_to_use_orig) break; @@ -1299,6 +1305,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) /* rollback one */ ring_ptr_move_bw(ring, next_to_use); + if (!ring->desc_cb[ring->next_to_use].dma) + continue; + /* unmap the descriptor dma address */ if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) dma_unmap_single(dev, @@ -1313,6 +1322,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) ring->desc_cb[ring->next_to_use].length = 0; ring->desc_cb[ring->next_to_use].dma = 0; + ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; } } @@ -3993,9 +4003,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_put_ring_config(priv); - hns3_dbg_uninit(handle); - out_netdev_free: + hns3_dbg_uninit(handle); free_netdev(netdev); } @@ -4007,8 +4016,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) return; if (linkup) { - netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); + netif_carrier_on(netdev); if (netif_msg_link(handle)) netdev_info(netdev, "link up\n"); } else { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 52c9d204fe3d..34e5448d59f6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -174,18 +174,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, { struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; unsigned char *packet = skb->data; + u32 len = skb_headlen(skb); u32 i; - for (i = 0; i < skb->len; i++) + len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE); + + for (i = 0; i < len; i++) if (packet[i] != (unsigned char)(i & 0xff)) break; /* The packet is correctly received */ - if (i == skb->len) + if (i == HNS3_NIC_LB_TEST_PACKET_SIZE) tqp_vector->rx_group.total_packets++; else print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, - skb->data, skb->len, true); + skb->data, len, true); dev_kfree_skb_any(skb); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d4652dea4569..6c3d13110993 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -5627,9 +5627,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, /* to avoid rule conflict, when user configure rule by ethtool, * we need to clear all arfs rules */ + spin_lock_bh(&hdev->fd_rule_lock); hclge_clear_arfs_rules(handle); - spin_lock_bh(&hdev->fd_rule_lock); ret = hclge_fd_config_rule(hdev, rule); spin_unlock_bh(&hdev->fd_rule_lock); @@ -5672,6 +5672,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, return ret; } +/* make sure being called after lock up with fd_rule_lock */ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, bool clear_list) { @@ -5684,7 +5685,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, if (!hnae3_dev_fd_supported(hdev)) return; - spin_lock_bh(&hdev->fd_rule_lock); for_each_set_bit(location, hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, @@ -5701,8 +5701,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, bitmap_zero(hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); } - - spin_unlock_bh(&hdev->fd_rule_lock); } static int hclge_restore_fd_entries(struct hnae3_handle *handle) @@ -6069,7 +6067,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, u16 flow_id, struct flow_keys *fkeys) { struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_fd_rule_tuples new_tuples; + struct hclge_fd_rule_tuples new_tuples = {}; struct hclge_dev *hdev = vport->back; struct hclge_fd_rule *rule; u16 tmp_queue_id; @@ -6079,20 +6077,18 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, if (!hnae3_dev_fd_supported(hdev)) return -EOPNOTSUPP; - memset(&new_tuples, 0, sizeof(new_tuples)); - hclge_fd_get_flow_tuples(fkeys, &new_tuples); - - spin_lock_bh(&hdev->fd_rule_lock); - /* when there is already fd rule existed add by user, * arfs should not work */ + spin_lock_bh(&hdev->fd_rule_lock); if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { spin_unlock_bh(&hdev->fd_rule_lock); return -EOPNOTSUPP; } + hclge_fd_get_flow_tuples(fkeys, &new_tuples); + /* check is there flow director filter existed for this flow, * if not, create a new filter for it; * if filter exist with different queue id, modify the filter; @@ -6177,6 +6173,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) #endif } +/* make sure being called after lock up with fd_rule_lock */ static void hclge_clear_arfs_rules(struct hnae3_handle *handle) { #ifdef CONFIG_RFS_ACCEL @@ -6221,10 +6218,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) hdev->fd_en = enable; clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; - if (!enable) + + if (!enable) { + spin_lock_bh(&hdev->fd_rule_lock); hclge_del_all_fd_entries(handle, clear); - else + spin_unlock_bh(&hdev->fd_rule_lock); + } else { hclge_restore_fd_entries(handle); + } } static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) @@ -6678,8 +6679,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle) int i; set_bit(HCLGE_STATE_DOWN, &hdev->state); - + spin_lock_bh(&hdev->fd_rule_lock); hclge_clear_arfs_rules(handle); + spin_unlock_bh(&hdev->fd_rule_lock); /* If it is not PF reset, the firmware will disable the MAC, * so it only need to stop phy here. diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index c5be4ebd8437..aa32a5b04112 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1682,7 +1682,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev->min_mtu = IBMVETH_MIN_MTU; - netdev->max_mtu = ETH_MAX_MTU; + netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH; memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5a42ddeecfe5..2d20a48f0ba0 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -779,12 +779,13 @@ static int ibmvnic_login(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); unsigned long timeout = msecs_to_jiffies(30000); int retry_count = 0; + int retries = 10; bool retry; int rc; do { retry = false; - if (retry_count > IBMVNIC_MAX_QUEUES) { + if (retry_count > retries) { netdev_warn(netdev, "Login attempts exceeded\n"); return -1; } @@ -799,11 +800,23 @@ static int ibmvnic_login(struct net_device *netdev) if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { - netdev_warn(netdev, "Login timed out\n"); - return -1; + netdev_warn(netdev, "Login timed out, retrying...\n"); + retry = true; + adapter->init_done_rc = 0; + retry_count++; + continue; } - if (adapter->init_done_rc == PARTIALSUCCESS) { + if (adapter->init_done_rc == ABORTED) { + netdev_warn(netdev, "Login aborted, retrying...\n"); + retry = true; + adapter->init_done_rc = 0; + retry_count++; + /* FW or device may be busy, so + * wait a bit before retrying login + */ + msleep(500); + } else if (adapter->init_done_rc == PARTIALSUCCESS) { retry_count++; release_sub_crqs(adapter, 1); @@ -1865,13 +1878,18 @@ static int do_reset(struct ibmvnic_adapter *adapter, release_sub_crqs(adapter, 1); } else { rc = ibmvnic_reset_crq(adapter); - if (!rc) + if (rc == H_CLOSED || rc == H_SUCCESS) { rc = vio_enable_interrupts(adapter->vdev); + if (rc) + netdev_err(adapter->netdev, + "Reset failed to enable interrupts. rc=%d\n", + rc); + } } if (rc) { netdev_err(adapter->netdev, - "Couldn't initialize crq. rc=%d\n", rc); + "Reset couldn't initialize crq. rc=%d\n", rc); goto out; } @@ -3068,7 +3086,7 @@ req_rx_irq_failed: req_tx_irq_failed: for (j = 0; j < i; j++) { free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); - irq_dispose_mapping(adapter->rx_scrq[j]->irq); + irq_dispose_mapping(adapter->tx_scrq[j]->irq); } release_sub_crqs(adapter, 1); return rc; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 86493fea56e4..f93ed70709c6 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3140,8 +3140,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); if (skb->data_len && hdr_len == len) { switch (hw->mac_type) { + case e1000_82544: { unsigned int pull_size; - case e1000_82544: + /* Make sure we have room to chop off 4 bytes, * and that the end alignment will work out to * this hardware's requirements @@ -3162,6 +3163,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, } len = skb_headlen(skb); break; + } default: /* do nothing */ break; diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 37a2314d3e6b..944abd5eae11 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -576,7 +576,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) #define er32(reg) __er32(hw, E1000_##reg) -s32 __ew32_prepare(struct e1000_hw *hw); void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val); #define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index eff75bd8a8f0..11fdc27faa82 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -86,6 +86,12 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_ICP_I219_V8 0x15E0 #define E1000_DEV_ID_PCH_ICP_I219_LM9 0x15E1 #define E1000_DEV_ID_PCH_ICP_I219_V9 0x15E2 +#define E1000_DEV_ID_PCH_CMP_I219_LM10 0x0D4E +#define E1000_DEV_ID_PCH_CMP_I219_V10 0x0D4F +#define E1000_DEV_ID_PCH_CMP_I219_LM11 0x0D4C +#define E1000_DEV_ID_PCH_CMP_I219_V11 0x0D4D +#define E1000_DEV_ID_PCH_CMP_I219_LM12 0x0D53 +#define E1000_DEV_ID_PCH_CMP_I219_V12 0x0D55 #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 8c4507838325..4cb05a31e66d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -119,14 +119,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set * and try again a number of times. **/ -s32 __ew32_prepare(struct e1000_hw *hw) +static void __ew32_prepare(struct e1000_hw *hw) { s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) udelay(50); - - return i; } void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) @@ -607,11 +605,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) { struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - s32 ret_val = __ew32_prepare(hw); + __ew32_prepare(hw); writel(i, rx_ring->tail); - if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { + if (unlikely(i != readl(rx_ring->tail))) { u32 rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); @@ -624,11 +622,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - s32 ret_val = __ew32_prepare(hw); + __ew32_prepare(hw); writel(i, tx_ring->tail); - if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { + if (unlikely(i != readl(tx_ring->tail))) { u32 tctl = er32(TCTL); ew32(TCTL, tctl & ~E1000_TCTL_EN); @@ -5289,6 +5287,10 @@ static void e1000_watchdog_task(struct work_struct *work) /* oops */ break; } + if (hw->mac.type == e1000_pch_spt) { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } } /* enable transmits in the hardware, need to do this @@ -6343,11 +6345,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 ctrl, ctrl_ext, rctl, status; - /* Runtime suspend should only enable wakeup for link changes */ - u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + u32 ctrl, ctrl_ext, rctl, status, wufc; int retval = 0; + /* Runtime suspend should only enable wakeup for link changes */ + if (runtime) + wufc = E1000_WUFC_LNKC; + else if (device_may_wakeup(&pdev->dev)) + wufc = adapter->wol; + else + wufc = 0; + status = er32(STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -6404,7 +6412,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); } else if (hw->mac.type >= e1000_pch_lpt) { - if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) /* ULP does not support wake from unicast, multicast * or broadcast. */ @@ -7560,6 +7568,12 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM10), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V10), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM11), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 69a2daaca5c5..d7684ac2522e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1211,7 +1211,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 +#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 3160b5bbe672..66f7deaf46ae 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1949,6 +1949,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, return status; } +/** + * i40e_is_aq_api_ver_ge + * @aq: pointer to AdminQ info containing HW API version to compare + * @maj: API major value + * @min: API minor value + * + * Assert whether current HW API version is greater/equal than provided. + **/ +static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, + u16 min) +{ + return (aq->api_maj_ver > maj || + (aq->api_maj_ver == maj && aq->api_min_ver >= min)); +} + /** * i40e_aq_add_vsi * @hw: pointer to the hw struct @@ -2074,18 +2089,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (rx_only_promisc && - (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1))) - flags |= I40E_AQC_SET_VSI_PROMISC_TX; + if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); - if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1)) - cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + cmd->valid_flags |= + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2182,11 +2195,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); - if (enable) + if (enable) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; + } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + cmd->valid_flags |= + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a8dd0228b678..b3c3911adfc2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -458,11 +458,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, i40e_get_netdev_stats_struct_tx(ring, stats); if (i40e_enabled_xdp_vsi(vsi)) { - ring++; + ring = READ_ONCE(vsi->xdp_rings[i]); + if (!ring) + continue; i40e_get_netdev_stats_struct_tx(ring, stats); } - ring++; + ring = READ_ONCE(vsi->rx_rings[i]); + if (!ring) + continue; do { start = u64_stats_fetch_begin_irq(&ring->syncp); packets = ring->stats.packets; @@ -806,6 +810,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */ p = READ_ONCE(vsi->tx_rings[q]); + if (!p) + continue; do { start = u64_stats_fetch_begin_irq(&p->syncp); @@ -819,8 +825,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; - /* Rx queue is part of the same block as Tx queue */ - p = &p[1]; + /* locate Rx ring */ + p = READ_ONCE(vsi->rx_rings[q]); + if (!p) + continue; + do { start = u64_stats_fetch_begin_irq(&p->syncp); packets = p->stats.packets; @@ -10816,10 +10825,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) if (vsi->tx_rings && vsi->tx_rings[0]) { for (i = 0; i < vsi->alloc_queue_pairs; i++) { kfree_rcu(vsi->tx_rings[i], rcu); - vsi->tx_rings[i] = NULL; - vsi->rx_rings[i] = NULL; + WRITE_ONCE(vsi->tx_rings[i], NULL); + WRITE_ONCE(vsi->rx_rings[i], NULL); if (vsi->xdp_rings) - vsi->xdp_rings[i] = NULL; + WRITE_ONCE(vsi->xdp_rings[i], NULL); } } } @@ -10853,7 +10862,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->itr_setting = pf->tx_itr_default; - vsi->tx_rings[i] = ring++; + WRITE_ONCE(vsi->tx_rings[i], ring++); if (!i40e_enabled_xdp_vsi(vsi)) goto setup_rx; @@ -10871,7 +10880,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); ring->itr_setting = pf->tx_itr_default; - vsi->xdp_rings[i] = ring++; + WRITE_ONCE(vsi->xdp_rings[i], ring++); setup_rx: ring->queue_index = i; @@ -10884,7 +10893,7 @@ setup_rx: ring->size = 0; ring->dcb_tc = 0; ring->itr_setting = pf->rx_itr_default; - vsi->rx_rings[i] = ring; + WRITE_ONCE(vsi->rx_rings[i], ring); } return 0; @@ -15333,6 +15342,9 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); + while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + usleep_range(1000, 2000); + /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index bd1b1ed323f4..6b9117a350fa 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -87,6 +87,10 @@ struct iavf_vsi { #define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4) #define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */ +#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \ + (IAVF_MAX_VF_VSI * \ + sizeof(struct virtchnl_vsi_resource))) + /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. */ @@ -306,6 +310,14 @@ struct iavf_adapter { bool netdev_registered; bool link_up; enum virtchnl_link_speed link_speed; + /* This is only populated if the VIRTCHNL_VF_CAP_ADV_LINK_SPEED is set + * in vf_res->vf_cap_flags. Use ADV_LINK_SUPPORT macro to determine if + * this field is valid. This field should be used going forward and the + * enum virtchnl_link_speed above should be considered the legacy way of + * storing/communicating link speeds. + */ + u32 link_speed_mbps; + enum virtchnl_ops current_op; #define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \ (_a)->vf_res->vf_cap_flags & \ @@ -322,6 +334,8 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_RSS_PF))) #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_VLAN) +#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_CAP_ADV_LINK_SPEED) struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ struct virtchnl_version_info pf_version; diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index dad3eec8ccd8..758bef02a2a8 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -278,7 +278,18 @@ static int iavf_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_zero_link_mode(cmd, supported); cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.port = PORT_NONE; - /* Set speed and duplex */ + cmd->base.duplex = DUPLEX_FULL; + + if (ADV_LINK_SUPPORT(adapter)) { + if (adapter->link_speed_mbps && + adapter->link_speed_mbps < U32_MAX) + cmd->base.speed = adapter->link_speed_mbps; + else + cmd->base.speed = SPEED_UNKNOWN; + + return 0; + } + switch (adapter->link_speed) { case IAVF_LINK_SPEED_40GB: cmd->base.speed = SPEED_40000; @@ -306,7 +317,6 @@ static int iavf_get_link_ksettings(struct net_device *netdev, default: break; } - cmd->base.duplex = DUPLEX_FULL; return 0; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 8e16be960e96..34124c213d27 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1756,17 +1756,17 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; - int err = 0, bufsz; + int err; WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES); /* aq msg sent, awaiting reply */ if (!adapter->vf_res) { - bufsz = sizeof(struct virtchnl_vf_resource) + - (IAVF_MAX_VF_VSI * - sizeof(struct virtchnl_vsi_resource)); - adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); - if (!adapter->vf_res) + adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE, + GFP_KERNEL); + if (!adapter->vf_res) { + err = -ENOMEM; goto err; + } } err = iavf_get_vf_config(adapter); if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { @@ -1863,8 +1863,10 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); - if (!adapter->rss_key || !adapter->rss_lut) + if (!adapter->rss_key || !adapter->rss_lut) { + err = -ENOMEM; goto err_mem; + } if (RSS_AQ(adapter)) adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; else @@ -1946,7 +1948,10 @@ static void iavf_watchdog_task(struct work_struct *work) iavf_send_api_ver(adapter); } } else { - if (!iavf_process_aq_command(adapter) && + /* An error will be returned if no commands were + * processed; use this opportunity to update stats + */ + if (iavf_process_aq_command(adapter) && adapter->state == __IAVF_RUNNING) iavf_request_stats(adapter); } @@ -2036,7 +2041,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) iavf_reset_interrupt_capability(adapter); iavf_free_queues(adapter); iavf_free_q_vectors(adapter); - kfree(adapter->vf_res); + memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); @@ -2487,6 +2492,16 @@ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, { int speed = 0, ret = 0; + if (ADV_LINK_SUPPORT(adapter)) { + if (adapter->link_speed_mbps < U32_MAX) { + speed = adapter->link_speed_mbps; + goto validate_bw; + } else { + dev_err(&adapter->pdev->dev, "Unknown link speed\n"); + return -EINVAL; + } + } + switch (adapter->link_speed) { case IAVF_LINK_SPEED_40GB: speed = 40000; @@ -2510,6 +2525,7 @@ static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter, break; } +validate_bw: if (max_tx_rate > speed) { dev_err(&adapter->pdev->dev, "Invalid tx rate specified\n"); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 1ab9cb339acb..9655318803b7 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -139,7 +139,8 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_ENCAP | VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | - VIRTCHNL_VF_OFFLOAD_ADQ; + VIRTCHNL_VF_OFFLOAD_ADQ | + VIRTCHNL_VF_CAP_ADV_LINK_SPEED; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG; @@ -918,6 +919,8 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); } +#define IAVF_MAX_SPEED_STRLEN 13 + /** * iavf_print_link_message - print link up or down * @adapter: adapter structure @@ -927,37 +930,99 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) static void iavf_print_link_message(struct iavf_adapter *adapter) { struct net_device *netdev = adapter->netdev; - char *speed = "Unknown "; + int link_speed_mbps; + char *speed; if (!adapter->link_up) { netdev_info(netdev, "NIC Link is Down\n"); return; } + speed = kcalloc(1, IAVF_MAX_SPEED_STRLEN, GFP_KERNEL); + if (!speed) + return; + + if (ADV_LINK_SUPPORT(adapter)) { + link_speed_mbps = adapter->link_speed_mbps; + goto print_link_msg; + } + switch (adapter->link_speed) { case IAVF_LINK_SPEED_40GB: - speed = "40 G"; + link_speed_mbps = SPEED_40000; break; case IAVF_LINK_SPEED_25GB: - speed = "25 G"; + link_speed_mbps = SPEED_25000; break; case IAVF_LINK_SPEED_20GB: - speed = "20 G"; + link_speed_mbps = SPEED_20000; break; case IAVF_LINK_SPEED_10GB: - speed = "10 G"; + link_speed_mbps = SPEED_10000; break; case IAVF_LINK_SPEED_1GB: - speed = "1000 M"; + link_speed_mbps = SPEED_1000; break; case IAVF_LINK_SPEED_100MB: - speed = "100 M"; + link_speed_mbps = SPEED_100; break; default: + link_speed_mbps = SPEED_UNKNOWN; break; } - netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed); +print_link_msg: + if (link_speed_mbps > SPEED_1000) { + if (link_speed_mbps == SPEED_2500) + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps"); + else + /* convert to Gbps inline */ + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", + link_speed_mbps / 1000, "Gbps"); + } else if (link_speed_mbps == SPEED_UNKNOWN) { + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); + } else { + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s", + link_speed_mbps, "Mbps"); + } + + netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); + kfree(speed); +} + +/** + * iavf_get_vpe_link_status + * @adapter: adapter structure + * @vpe: virtchnl_pf_event structure + * + * Helper function for determining the link status + **/ +static bool +iavf_get_vpe_link_status(struct iavf_adapter *adapter, + struct virtchnl_pf_event *vpe) +{ + if (ADV_LINK_SUPPORT(adapter)) + return vpe->event_data.link_event_adv.link_status; + else + return vpe->event_data.link_event.link_status; +} + +/** + * iavf_set_adapter_link_speed_from_vpe + * @adapter: adapter structure for which we are setting the link speed + * @vpe: virtchnl_pf_event structure that contains the link speed we are setting + * + * Helper function for setting iavf_adapter link speed + **/ +static void +iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter, + struct virtchnl_pf_event *vpe) +{ + if (ADV_LINK_SUPPORT(adapter)) + adapter->link_speed_mbps = + vpe->event_data.link_event_adv.link_speed; + else + adapter->link_speed = vpe->event_data.link_event.link_speed; } /** @@ -1187,12 +1252,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (v_opcode == VIRTCHNL_OP_EVENT) { struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; - bool link_up = vpe->event_data.link_event.link_status; + bool link_up = iavf_get_vpe_link_status(adapter, vpe); switch (vpe->event) { case VIRTCHNL_EVENT_LINK_CHANGE: - adapter->link_speed = - vpe->event_data.link_event.link_speed; + iavf_set_adapter_link_speed_from_vpe(adapter, vpe); /* we've already got the right link status, bail */ if (adapter->link_up == link_up) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 171f0b625407..d68b8aa31b19 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -436,6 +436,7 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id) static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) { struct ice_switch_info *sw; + enum ice_status status; hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*hw->switch_info), GFP_KERNEL); @@ -446,7 +447,12 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) INIT_LIST_HEAD(&sw->vsi_list_map_head); - return ice_init_def_sw_recp(hw); + status = ice_init_def_sw_recp(hw); + if (status) { + devm_kfree(ice_hw_to_dev(hw), hw->switch_info); + return status; + } + return 0; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index c68709c7ef81..2e9c97bad3c3 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -199,7 +199,9 @@ unwind_alloc_rq_bufs: cq->rq.r.rq_bi[i].pa = 0; cq->rq.r.rq_bi[i].size = 0; } + cq->rq.r.rq_bi = NULL; devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); + cq->rq.dma_head = NULL; return ICE_ERR_NO_MEMORY; } @@ -245,7 +247,9 @@ unwind_alloc_sq_bufs: cq->sq.r.sq_bi[i].pa = 0; cq->sq.r.sq_bi[i].size = 0; } + cq->sq.r.sq_bi = NULL; devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); + cq->sq.dma_head = NULL; return ICE_ERR_NO_MEMORY; } @@ -304,6 +308,28 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) return 0; } +#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ +do { \ + int i; \ + /* free descriptors */ \ + if ((qi)->ring.r.ring##_bi) \ + for (i = 0; i < (qi)->num_##ring##_entries; i++) \ + if ((qi)->ring.r.ring##_bi[i].pa) { \ + dmam_free_coherent(ice_hw_to_dev(hw), \ + (qi)->ring.r.ring##_bi[i].size, \ + (qi)->ring.r.ring##_bi[i].va, \ + (qi)->ring.r.ring##_bi[i].pa); \ + (qi)->ring.r.ring##_bi[i].va = NULL;\ + (qi)->ring.r.ring##_bi[i].pa = 0;\ + (qi)->ring.r.ring##_bi[i].size = 0;\ + } \ + /* free the buffer info list */ \ + if ((qi)->ring.cmd_buf) \ + devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ + /* free DMA head */ \ + devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ +} while (0) + /** * ice_init_sq - main initialization routine for Control ATQ * @hw: pointer to the hardware structure @@ -357,6 +383,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) goto init_ctrlq_exit; init_ctrlq_free_rings: + ICE_FREE_CQ_BUFS(hw, cq, sq); ice_free_cq_ring(hw, &cq->sq); init_ctrlq_exit: @@ -416,33 +443,13 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) goto init_ctrlq_exit; init_ctrlq_free_rings: + ICE_FREE_CQ_BUFS(hw, cq, rq); ice_free_cq_ring(hw, &cq->rq); init_ctrlq_exit: return ret_code; } -#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ -do { \ - int i; \ - /* free descriptors */ \ - for (i = 0; i < (qi)->num_##ring##_entries; i++) \ - if ((qi)->ring.r.ring##_bi[i].pa) { \ - dmam_free_coherent(ice_hw_to_dev(hw), \ - (qi)->ring.r.ring##_bi[i].size,\ - (qi)->ring.r.ring##_bi[i].va,\ - (qi)->ring.r.ring##_bi[i].pa);\ - (qi)->ring.r.ring##_bi[i].va = NULL; \ - (qi)->ring.r.ring##_bi[i].pa = 0; \ - (qi)->ring.r.ring##_bi[i].size = 0; \ - } \ - /* free the buffer info list */ \ - if ((qi)->ring.cmd_buf) \ - devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ - /* free DMA head */ \ - devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ -} while (0) - /** * ice_shutdown_sq - shutdown the Control ATQ * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index cbd53b586c36..6cfe8eb7f47d 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -1535,10 +1535,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->ref_count), GFP_KERNEL); + if (!es->ref_count) + goto err; es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->written), GFP_KERNEL); - if (!es->ref_count) + if (!es->written) goto err; } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2408f0de95fc..d0ccb7ad447b 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2900,7 +2900,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) if (err) { dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); err = -EIO; - goto err_init_interrupt_unroll; + goto err_init_vsi_unroll; } /* Driver is mostly up */ @@ -2986,6 +2986,7 @@ err_msix_misc_unroll: ice_free_irq_msix_misc(pf); err_init_interrupt_unroll: ice_clear_interrupt_scheme(pf); +err_init_vsi_unroll: devm_kfree(dev, pf->vsi); err_init_pf_unroll: ice_deinit_pf(pf); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 8959418776f6..f80933320fd3 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -143,7 +143,8 @@ static int igb_get_link_ksettings(struct net_device *netdev, u32 speed; u32 supported, advertising; - status = rd32(E1000_STATUS); + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(E1000_STATUS); if (hw->phy.media_type == e1000_media_type_copper) { supported = (SUPPORTED_10baseT_Half | diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ed7e667d7eb2..3e41b20ed8eb 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6194,9 +6194,18 @@ static void igb_reset_task(struct work_struct *work) struct igb_adapter *adapter; adapter = container_of(work, struct igb_adapter, reset_task); + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + igb_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); + rtnl_unlock(); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 0bd1294ba517..39c5e6fdb72c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -2243,7 +2243,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) } /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time * 0x00010001; + reg = hw->fc.pause_time * 0x00010001U; for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index ccd852ad62a4..d50c5b55da18 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, } /* alloc the udl from per cpu ddp pool */ - ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp); + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index cc3196ae5aea..636e6e840afa 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -923,7 +923,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = txr_idx; /* assign ring to adapter */ - adapter->tx_ring[txr_idx] = ring; + WRITE_ONCE(adapter->tx_ring[txr_idx], ring); /* update count and index */ txr_count--; @@ -950,7 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, set_ring_xdp(ring); /* assign ring to adapter */ - adapter->xdp_ring[xdp_idx] = ring; + WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); /* update count and index */ xdp_count--; @@ -993,7 +993,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = rxr_idx; /* assign ring to adapter */ - adapter->rx_ring[rxr_idx] = ring; + WRITE_ONCE(adapter->rx_ring[rxr_idx], ring); /* update count and index */ rxr_count--; @@ -1022,13 +1022,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) ixgbe_for_each_ring(ring, q_vector->tx) { if (ring_is_xdp(ring)) - adapter->xdp_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL); else - adapter->tx_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL); } ixgbe_for_each_ring(ring, q_vector->rx) - adapter->rx_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL); adapter->q_vector[v_idx] = NULL; napi_hash_del(&q_vector->napi); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a26f9fb95ac0..5336bfcd2d70 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2254,7 +2254,8 @@ static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, rx_buffer->page_offset ^= truesize; #else unsigned int truesize = ring_uses_build_skb(rx_ring) ? - SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : SKB_DATA_ALIGN(size); rx_buffer->page_offset += truesize; @@ -7063,7 +7064,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) } for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; + struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); + + if (!rx_ring) + continue; non_eop_descs += rx_ring->rx_stats.non_eop_descs; alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; @@ -7084,15 +7088,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) packets = 0; /* gather some stats to the adapter struct that are per queue */ for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]); + + if (!tx_ring) + continue; restart_queue += tx_ring->tx_stats.restart_queue; tx_busy += tx_ring->tx_stats.tx_busy; bytes += tx_ring->stats.bytes; packets += tx_ring->stats.packets; } for (i = 0; i < adapter->num_xdp_queues; i++) { - struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; + struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]); + if (!xdp_ring) + continue; restart_queue += xdp_ring->tx_stats.restart_queue; tx_busy += xdp_ring->tx_stats.tx_busy; bytes += xdp_ring->stats.bytes; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index a10ae28ebc8a..ccb2abd18d6c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -104,9 +104,11 @@ #define MVNETA_TX_IN_PRGRS BIT(1) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c +/* Only exists on Armada XP and Armada 370 */ #define MVNETA_SERDES_CFG 0x24A0 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 #define MVNETA_QSGMII_SERDES_PROTO 0x0667 +#define MVNETA_HSGMII_SERDES_PROTO 0x1107 #define MVNETA_TYPE_PRIO 0x24bc #define MVNETA_FORCE_UNI BIT(21) #define MVNETA_TXQ_CMD_1 0x24e4 @@ -3164,26 +3166,60 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) return 0; } -static int mvneta_comphy_init(struct mvneta_port *pp) +static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) { int ret; - if (!pp->comphy) - return 0; - - ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, - pp->phy_interface); + ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); if (ret) return ret; return phy_power_on(pp->comphy); } +static int mvneta_config_interface(struct mvneta_port *pp, + phy_interface_t interface) +{ + int ret = 0; + + if (pp->comphy) { + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) { + ret = mvneta_comphy_init(pp, interface); + } + } else { + switch (interface) { + case PHY_INTERFACE_MODE_QSGMII: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_QSGMII_SERDES_PROTO); + break; + + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_SGMII_SERDES_PROTO); + break; + + case PHY_INTERFACE_MODE_2500BASEX: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_HSGMII_SERDES_PROTO); + break; + default: + break; + } + } + + pp->phy_interface = interface; + + return ret; +} + static void mvneta_start_dev(struct mvneta_port *pp) { int cpu; - WARN_ON(mvneta_comphy_init(pp)); + WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -3558,17 +3594,13 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, /* When at 2.5G, the link partner can send frames with shortened * preambles. */ - if (state->speed == SPEED_2500) + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; - if (pp->comphy && pp->phy_interface != state->interface && - (state->interface == PHY_INTERFACE_MODE_SGMII || - state->interface == PHY_INTERFACE_MODE_1000BASEX || - state->interface == PHY_INTERFACE_MODE_2500BASEX)) { - pp->phy_interface = state->interface; - - WARN_ON(phy_power_off(pp->comphy)); - WARN_ON(mvneta_comphy_init(pp)); + if (pp->phy_interface != state->interface) { + if (pp->comphy) + WARN_ON(phy_power_off(pp->comphy)); + WARN_ON(mvneta_config_interface(pp, state->interface)); } if (new_ctrl0 != gmac_ctrl0) @@ -4469,12 +4501,10 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) /* MAC Cause register should be cleared */ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); - if (phy_mode == PHY_INTERFACE_MODE_QSGMII) - mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); - else if (phy_mode == PHY_INTERFACE_MODE_SGMII || - phy_interface_mode_is_8023z(phy_mode)) - mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); - else if (!phy_interface_mode_is_rgmii(phy_mode)) + if (phy_mode != PHY_INTERFACE_MODE_QSGMII && + phy_mode != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_8023z(phy_mode) && + !phy_interface_mode_is_rgmii(phy_mode)) return -EINVAL; return 0; @@ -4661,10 +4691,10 @@ static int mvneta_probe(struct platform_device *pdev) if (err < 0) goto err_netdev; - err = mvneta_port_power_up(pp, phy_mode); + err = mvneta_port_power_up(pp, pp->phy_interface); if (err < 0) { dev_err(&pdev->dev, "can't power up port\n"); - goto err_netdev; + return err; } /* Armada3700 network controller does not support per-cpu diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 373b8c832850..0f136f1af5d1 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1541,7 +1541,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port) for (q = 0; q < port->ntxqs; q++) for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) *pstats++ += mvpp2_read_index(port->priv, - MVPP22_CTRS_TX_CTR(port->id, i), + MVPP22_CTRS_TX_CTR(port->id, q), mvpp2_ethtool_txq_regs[i].offset); /* Rxqs are numbered from 0 from the user standpoint, but not from the @@ -1550,7 +1550,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port) for (q = 0; q < port->nrxqs; q++) for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) *pstats++ += mvpp2_read_index(port->priv, - port->first_rxq + i, + port->first_rxq + q, mvpp2_ethtool_rxq_regs[i].offset); } @@ -5925,8 +5925,8 @@ static int mvpp2_remove(struct platform_device *pdev) { struct mvpp2 *priv = platform_get_drvdata(pdev); struct fwnode_handle *fwnode = pdev->dev.fwnode; + int i = 0, poolnum = MVPP2_BM_POOLS_NUM; struct fwnode_handle *port_fwnode; - int i = 0; mvpp2_dbgfs_cleanup(priv); @@ -5940,7 +5940,10 @@ static int mvpp2_remove(struct platform_device *pdev) destroy_workqueue(priv->stats_queue); - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + if (priv->percpu_pools) + poolnum = mvpp2_get_nrxqs(priv) * 2; + + for (i = 0; i < poolnum; i++) { struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 413c3f254cf8..c881a573da66 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -43,7 +43,7 @@ struct qmem { void *base; dma_addr_t iova; int alloc_sz; - u8 entry_sz; + u16 entry_sz; u8 align; u32 qsize; }; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 5f56ee83e3b1..df7c23cd3360 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -203,7 +203,7 @@ io_error: static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) { - u16 v; + u16 v = 0; __gm_phy_read(hw, port, reg, &v); return v; } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 997dc811382a..d01b3a1b40f4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, return 0; } -static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) +static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, + phy_interface_t interface, int speed) { u32 val; int ret; + if (interface == PHY_INTERFACE_MODE_TRGMII) { + mtk_w32(eth, TRGMII_MODE, INTF_MODE); + val = 500000000; + ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); + if (ret) + dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); + return; + } + val = (speed == SPEED_1000) ? INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100; mtk_w32(eth, val, INTF_MODE); @@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, state->interface)) goto err_phy; } else { - if (state->interface != - PHY_INTERFACE_MODE_TRGMII) - mtk_gmac0_rgmii_adjust(mac->hw, - state->speed); + mtk_gmac0_rgmii_adjust(mac->hw, + state->interface, + state->speed); /* mt7623_pad_clk_setup */ for (i = 0 ; i < NUM_TRGMII_CTRL; i++) @@ -2869,6 +2878,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) eth->netdev[id]->irq = eth->irq[0]; eth->netdev[id]->dev.of_node = np; + eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; + return 0; free_netdev: diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 87c2e8de6102..942646fb2256 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4354,12 +4354,14 @@ end: static void mlx4_shutdown(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); mutex_unlock(&persist->interface_state_mutex); + mlx4_pci_disable_device(dev); } static const struct pci_error_handlers mlx4_err_handler = { diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 1a11bc0e1612..cfa0bba3940f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { - s = BITS_TO_LONGS(1 << (buddy->max_order - i)); + s = BITS_TO_LONGS(1UL << (buddy->max_order - i)); buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO); if (!buddy->bits[i]) goto err_out_free; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index fce6eccdcf8b..fa81a97f6ba9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -78,11 +78,26 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { [MLX5E_400GAUI_8] = 400000, }; +bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev) +{ + struct mlx5e_port_eth_proto eproto; + int err; + + if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet)) + return true; + + err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto); + if (err) + return false; + + return !!eproto.cap; +} + static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, const u32 **arr, u32 *size, bool force_legacy) { - bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev); *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : ARRAY_SIZE(mlx5e_link_speed); @@ -177,7 +192,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) bool ext; int err; - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = mlx5e_ptys_ext_supported(mdev); err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) goto out; @@ -205,7 +220,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) int err; int i; - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = mlx5e_ptys_ext_supported(mdev); err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h index 4a7f4497692b..e196888f7056 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, bool force_legacy); - +bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev); int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 39ee32518b10..8cd529556b21 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -200,7 +200,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev, struct ptys2ethtool_config **arr, u32 *size) { - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = mlx5e_ptys_ext_supported(mdev); *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : @@ -871,7 +871,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, struct ethtool_link_ksettings *link_ksettings) { unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = mlx5e_ptys_ext_supported(mdev); ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); } @@ -900,7 +900,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, __func__, err); goto err_query_regs; } - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_capability); eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, @@ -1052,7 +1052,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, autoneg = link_ksettings->base.autoneg; speed = link_ksettings->base.speed; - ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext_supported = mlx5e_ptys_ext_supported(mdev); ext = ext_requested(autoneg, adver, ext_supported); if (!ext_supported && ext) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c133beb6a7a5..ee0d78f801af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -432,7 +432,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; @@ -485,7 +485,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; @@ -3038,6 +3038,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; } +static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, + enum mlx5_port_status state) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int vport_admin_state; + + mlx5_set_port_admin_status(mdev, state); + + if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS) + return; + + if (state == MLX5_PORT_UP) + vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO; + else + vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN; + + mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state); +} + int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3070,7 +3089,7 @@ int mlx5e_open(struct net_device *netdev) mutex_lock(&priv->state_lock); err = mlx5e_open_locked(netdev); if (!err) - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); if (mlx5_vxlan_allowed(priv->mdev->vxlan)) @@ -3107,7 +3126,7 @@ int mlx5e_close(struct net_device *netdev) return -ENODEV; mutex_lock(&priv->state_lock); - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN); + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN); err = mlx5e_close_locked(netdev); mutex_unlock(&priv->state_lock); @@ -5172,7 +5191,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) /* Marking the link as currently not needed by the Driver */ if (!netif_running(netdev)) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); + mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN); mlx5e_set_netdev_mtu_boundaries(priv); mlx5e_set_dev_port_mtu(priv); @@ -5356,6 +5375,8 @@ err_cleanup_tx: profile->cleanup_tx(priv); out: + set_bit(MLX5E_STATE_DESTROYING, &priv->state); + cancel_work_sync(&priv->update_stats_work); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 9b232ef36d53..88b51f64a64e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1736,6 +1736,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work, mlx5e_tc_reoffload_flows_work); + mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK, + 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO); mlx5_lag_add(mdev, netdev); priv->events_nb.notifier_call = uplink_rep_async_event; mlx5_notifier_register(mdev, &priv->events_nb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index c4eed5bbcd45..066bada4ccd1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1428,6 +1428,7 @@ out: #ifdef CONFIG_MLX5_CORE_IPOIB +#define MLX5_IB_GRH_SGID_OFFSET 8 #define MLX5_IB_GRH_DGID_OFFSET 24 #define MLX5_GID_SIZE 16 @@ -1441,6 +1442,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, struct net_device *netdev; struct mlx5e_priv *priv; char *pseudo_header; + u32 flags_rqpn; u32 qpn; u8 *dgid; u8 g; @@ -1462,7 +1464,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, tstamp = &priv->tstamp; stats = &priv->channel_stats[rq->ix].rq; - g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; + flags_rqpn = be32_to_cpu(cqe->flags_rqpn); + g = (flags_rqpn >> 28) & 3; dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; if ((!g) || dgid[0] != 0xff) skb->pkt_type = PACKET_HOST; @@ -1471,9 +1474,15 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, else skb->pkt_type = PACKET_MULTICAST; - /* TODO: IB/ipoib: Allow mcast packets from other VFs - * 68996a6e760e5c74654723eeb57bf65628ae87f4 + /* Drop packets that this interface sent, ie multicast packets + * that the HCA has replicated. */ + if (g && (qpn == (flags_rqpn & 0xffffff)) && + (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET, + MLX5_GID_SIZE) == 0)) { + skb->dev = NULL; + return; + } skb_pull(skb, MLX5_IB_GRH_BYTES); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c6ed4b7f4f97..8e6ab8201939 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1919,7 +1919,7 @@ abort: mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); } - + esw_destroy_tsar(esw); return err; } @@ -2094,6 +2094,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT; + int other_vport = 1; int err = 0; if (!ESW_ALLOWED(esw)) @@ -2101,15 +2103,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, if (IS_ERR(evport)) return PTR_ERR(evport); + if (vport == MLX5_VPORT_UPLINK) { + opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK; + other_vport = 0; + vport = 0; + } mutex_lock(&esw->state_lock); - err = mlx5_modify_vport_admin_state(esw->dev, - MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - vport, 1, link_state); + err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state); if (err) { - mlx5_core_warn(esw->dev, - "Failed to set vport %d link state, err = %d", - vport, err); + mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d", + vport, opmod, err); goto unlock; } @@ -2151,8 +2155,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); int err = 0; - if (!ESW_ALLOWED(esw)) - return -EPERM; if (IS_ERR(evport)) return PTR_ERR(evport); if (vlan > 4095 || qos > 7) @@ -2180,6 +2182,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u8 set_flags = 0; int err; + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (vlan || qos) set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 6bd6f5895244..0ddbae1e64fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -606,6 +606,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { ret static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } +static inline +int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) { return ERR_PTR(-EOPNOTSUPP); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 86e6bbb57482..b66e5b6eecd9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -809,18 +809,15 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev, { struct mlx5_flow_root_namespace *root = find_root(&prio->node); struct mlx5_flow_table *iter; - int i = 0; int err; fs_for_each_ft(iter, prio) { - i++; err = root->cmds->modify_flow_table(root, iter, ft); if (err) { - mlx5_core_warn(dev, "Failed to modify flow table %d\n", - iter->id); + mlx5_core_err(dev, + "Failed to modify flow table id %d, type %d, err %d\n", + iter->id, iter->type, err); /* The driver is out of sync with the FW */ - if (i > 1) - WARN_ON(true); return err; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 43f97601b500..75fc283cacc3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -388,10 +388,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp, return 0; } +enum { + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0), + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1), +}; + static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { - return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, + ptp_info); + + switch (func) { + case PTP_PF_NONE: + return 0; + case PTP_PF_EXTTS: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN); + case PTP_PF_PEROUT: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; } static const struct ptp_clock_info mlx5_ptp_clock_info = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index cc262b30aed5..dc589322940c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) return 0; } -static int mlx5_eeprom_page(int offset) +static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num, + u8 *module_id) +{ + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; + u32 out[MLX5_ST_SZ_DW(mcia_reg)]; + int err, status; + u8 *ptr; + + MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW); + MLX5_SET(mcia_reg, in, module, module_num); + MLX5_SET(mcia_reg, in, device_address, 0); + MLX5_SET(mcia_reg, in, page_number, 0); + MLX5_SET(mcia_reg, in, size, 1); + MLX5_SET(mcia_reg, in, l, 0); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MCIA, 0, 0); + if (err) + return err; + + status = MLX5_GET(mcia_reg, out, status); + if (status) { + mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + + *module_id = ptr[0]; + + return 0; +} + +static int mlx5_qsfp_eeprom_page(u16 offset) { if (offset < MLX5_EEPROM_PAGE_LENGTH) /* Addresses between 0-255 - page 00 */ @@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset) MLX5_EEPROM_HIGH_PAGE_LENGTH); } -static int mlx5_eeprom_high_page_offset(int page_num) +static int mlx5_qsfp_eeprom_high_page_offset(int page_num) { if (!page_num) /* Page 0 always start from low page */ return 0; @@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num) return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH; } +static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = MLX5_I2C_ADDR_LOW; + *page_num = mlx5_qsfp_eeprom_page(*offset); + *offset -= mlx5_qsfp_eeprom_high_page_offset(*page_num); +} + +static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = MLX5_I2C_ADDR_LOW; + *page_num = 0; + + if (*offset < MLX5_EEPROM_PAGE_LENGTH) + return; + + *i2c_addr = MLX5_I2C_ADDR_HIGH; + *offset -= MLX5_EEPROM_PAGE_LENGTH; +} + int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, u16 offset, u16 size, u8 *data) { - int module_num, page_num, status, err; + int module_num, status, err, page_num = 0; + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; u32 out[MLX5_ST_SZ_DW(mcia_reg)]; - u32 in[MLX5_ST_SZ_DW(mcia_reg)]; - u16 i2c_addr; - void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + u16 i2c_addr = 0; + u8 module_id; + void *ptr; err = mlx5_query_module_num(dev, &module_num); if (err) return err; - memset(in, 0, sizeof(in)); - size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); + err = mlx5_query_module_id(dev, module_num, &module_id); + if (err) + return err; - /* Get the page number related to the given offset */ - page_num = mlx5_eeprom_page(offset); - - /* Set the right offset according to the page number, - * For page_num > 0, relative offset is always >= 128 (high page). - */ - offset -= mlx5_eeprom_high_page_offset(page_num); + switch (module_id) { + case MLX5_MODULE_ID_SFP: + mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset); + break; + case MLX5_MODULE_ID_QSFP: + case MLX5_MODULE_ID_QSFP_PLUS: + case MLX5_MODULE_ID_QSFP28: + mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset); + break; + default: + mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); + return -EINVAL; + } if (offset + size > MLX5_EEPROM_PAGE_LENGTH) /* Cross pages read, read until offset 256 in low page */ size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; - i2c_addr = MLX5_I2C_ADDR_LOW; + size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, module, module_num); @@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, return -EIO; } + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); memcpy(data, ptr, size); return size; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 7c77378accf0..f012aac83b10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -181,7 +181,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, in, pas)); err = mlx5_core_create_qp(mdev, &dr_qp->mqp, in, inlen); - kfree(in); + kvfree(in); if (err) { mlx5_core_warn(mdev, " Can't create QP\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 1e32e2443f73..348f02e336f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -247,29 +247,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, /* The order of the actions are must to be keep, only the following * order is supported by SW steering: - * TX: push vlan -> modify header -> encap + * TX: modify header -> push vlan -> encap * RX: decap -> pop vlan -> modify header */ - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { - tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]); - if (!tmp_action) { - err = -ENOMEM; - goto free_actions; - } - fs_dr_actions[fs_dr_num_actions++] = tmp_action; - actions[num_actions++] = tmp_action; - } - - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { - tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]); - if (!tmp_action) { - err = -ENOMEM; - goto free_actions; - } - fs_dr_actions[fs_dr_num_actions++] = tmp_action; - actions[num_actions++] = tmp_action; - } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { enum mlx5dr_action_reformat_type decap_type = DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2; @@ -322,6 +302,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = fte->action.modify_hdr->action.dr_action; + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { + tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { + tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + if (delay_encap_set) actions[num_actions++] = fte->action.pkt_reformat->action.dr_action; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 0a0884d86d44..c7c3fc7d1126 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -592,7 +592,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); if (err) - return err; + goto err_trap_register; err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); if (err) @@ -604,6 +604,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err_emad_trap_set: mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); +err_trap_register: destroy_workqueue(mlxsw_core->emad_wq); return err; } @@ -1576,7 +1577,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, bulk_list, cb, cb_priv, tid); if (err) { - kfree(trans); + kfree_rcu(trans, rcu); return err; } return 0; @@ -1801,11 +1802,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, break; } } - rcu_read_unlock(); - if (!found) + if (!found) { + rcu_read_unlock(); goto drop; + } rxl->func(skb, local_port, rxl_item->priv); + rcu_read_unlock(); return; drop: diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index f3d1f9411d10..aa4fef789084 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1401,23 +1401,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, u16 num_pages; int err; - mutex_init(&mlxsw_pci->cmd.lock); - init_waitqueue_head(&mlxsw_pci->cmd.wait); - mlxsw_pci->core = mlxsw_core; mbox = mlxsw_cmd_mbox_alloc(); if (!mbox) return -ENOMEM; - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); - if (err) - goto mbox_put; - - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); - if (err) - goto err_out_mbox_alloc; - err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); if (err) goto err_sw_reset; @@ -1524,9 +1513,6 @@ err_query_fw: mlxsw_pci_free_irq_vectors(mlxsw_pci); err_alloc_irq: err_sw_reset: - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); -err_out_mbox_alloc: - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); mbox_put: mlxsw_cmd_mbox_free(mbox); return err; @@ -1540,8 +1526,6 @@ static void mlxsw_pci_fini(void *bus_priv) mlxsw_pci_aqs_fini(mlxsw_pci); mlxsw_pci_fw_area_fini(mlxsw_pci); mlxsw_pci_free_irq_vectors(mlxsw_pci); - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); } static struct mlxsw_pci_queue * @@ -1755,6 +1739,37 @@ static const struct mlxsw_bus mlxsw_pci_bus = { .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, }; +static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci) +{ + int err; + + mutex_init(&mlxsw_pci->cmd.lock); + init_waitqueue_head(&mlxsw_pci->cmd.wait); + + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); + if (err) + goto err_in_mbox_alloc; + + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); + if (err) + goto err_out_mbox_alloc; + + return 0; + +err_out_mbox_alloc: + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); +err_in_mbox_alloc: + mutex_destroy(&mlxsw_pci->cmd.lock); + return err; +} + +static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci) +{ + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); + mutex_destroy(&mlxsw_pci->cmd.lock); +} + static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const char *driver_name = pdev->driver->name; @@ -1810,6 +1825,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->pdev = pdev; pci_set_drvdata(pdev, mlxsw_pci); + err = mlxsw_pci_cmd_init(mlxsw_pci); + if (err) + goto err_pci_cmd_init; + mlxsw_pci->bus_info.device_kind = driver_name; mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev; @@ -1827,6 +1846,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_bus_device_register: + mlxsw_pci_cmd_fini(mlxsw_pci); +err_pci_cmd_init: iounmap(mlxsw_pci->hw_addr); err_ioremap: err_pci_resource_len_check: @@ -1844,6 +1865,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev) struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); + mlxsw_pci_cmd_fini(mlxsw_pci); iounmap(mlxsw_pci->hw_addr); pci_release_regions(mlxsw_pci->pdev); pci_disable_device(mlxsw_pci->pdev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index efdf8cb5114c..2f013fc71698 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6287,7 +6287,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, } fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); - if (WARN_ON(!fib_work)) + if (!fib_work) return NOTIFY_BAD; fib_work->mlxsw_sp = router->mlxsw_sp; diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 2761f3a3ae50..56f285985b43 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1318,19 +1318,21 @@ static int nixge_probe(struct platform_device *pdev) netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT); err = nixge_of_get_resources(pdev); if (err) - return err; + goto free_netdev; __nixge_hw_set_mac_address(ndev); priv->tx_irq = platform_get_irq_byname(pdev, "tx"); if (priv->tx_irq < 0) { netdev_err(ndev, "could not find 'tx' irq"); - return priv->tx_irq; + err = priv->tx_irq; + goto free_netdev; } priv->rx_irq = platform_get_irq_byname(pdev, "rx"); if (priv->rx_irq < 0) { netdev_err(ndev, "could not find 'rx' irq"); - return priv->rx_irq; + err = priv->rx_irq; + goto free_netdev; } priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 544012a67221..1d59ef367a85 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -815,7 +815,8 @@ static int lpc_mii_init(struct netdata_local *pldat) if (mdiobus_register(pldat->mii_bus)) goto err_out_unregister_bus; - if (lpc_mii_probe(pldat->ndev) != 0) + err = lpc_mii_probe(pldat->ndev); + if (err) goto err_out_unregister_bus; return 0; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 7d10265f782a..5aacc00962df 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -102,15 +102,18 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct ionic_lif *lif = netdev_priv(netdev); + unsigned int offset; unsigned int size; regs->version = IONIC_DEV_CMD_REG_VERSION; + offset = 0; size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32); - memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size); + memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size); + offset += size; size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32); - memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size); + memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size); } static int ionic_get_link_ksettings(struct net_device *netdev, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 134640412d7b..e66002251596 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -666,7 +666,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq, eid = le64_to_cpu(comp->event.eid); /* Have we run out of new completions to process? */ - if (eid <= lif->last_eid) + if ((s64)(eid - lif->last_eid) <= 0) return false; lif->last_eid = eid; @@ -809,8 +809,7 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) if (f) return 0; - netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr, - ctx.comp.rx_filter_add.filter_id); + netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); err = ionic_adminq_post_wait(lif, &ctx); @@ -839,6 +838,9 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) return -ENOENT; } + netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", + addr, f->filter_id); + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); spin_unlock_bh(&lif->rx_filters.lock); @@ -847,9 +849,6 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) if (err) return err; - netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, - ctx.cmd.rx_filter_del.filter_id); - return 0; } @@ -1187,6 +1186,7 @@ static int ionic_init_nic_features(struct ionic_lif *lif) netdev->hw_features |= netdev->hw_enc_features; netdev->features |= netdev->hw_features; + netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; netdev->priv_flags |= IFF_UNICAST_FLT; @@ -1290,13 +1290,11 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, }; int err; + netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); err = ionic_adminq_post_wait(lif, &ctx); if (err) return err; - netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid, - ctx.comp.rx_filter_add.filter_id); - return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); } @@ -1321,8 +1319,8 @@ static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, return -ENOENT; } - netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid, - le32_to_cpu(ctx.cmd.rx_filter_del.filter_id)); + netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", + vid, f->filter_id); ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c index 7a093f148ee5..60cb77e2bab4 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -36,10 +36,12 @@ int ionic_rx_filters_init(struct ionic_lif *lif) spin_lock_init(&lif->rx_filters.lock); + spin_lock_bh(&lif->rx_filters.lock); for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]); INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]); } + spin_unlock_bh(&lif->rx_filters.lock); return 0; } @@ -51,11 +53,13 @@ void ionic_rx_filters_deinit(struct ionic_lif *lif) struct hlist_node *tmp; unsigned int i; + spin_lock_bh(&lif->rx_filters.lock); for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { head = &lif->rx_filters.by_id[i]; hlist_for_each_entry_safe(f, tmp, head, by_id) ionic_rx_filter_free(lif, f); } + spin_unlock_bh(&lif->rx_filters.lock); } int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, @@ -91,6 +95,7 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); f->rxq_index = rxq_index; memcpy(&f->cmd, ac, sizeof(f->cmd)); + netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id); INIT_HLIST_NODE(&f->by_hash); INIT_HLIST_NODE(&f->by_id); diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 8e1bdf58b9e7..8ea46b81b739 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -396,7 +396,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; } - iids->vf_cids += vf_cids * p_mngr->vf_count; + iids->vf_cids = vf_cids; iids->tids += vf_tids * p_mngr->vf_count; DP_VERBOSE(p_hwfn, QED_MSG_ILT, @@ -2073,8 +2073,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs); if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { - DP_NOTICE(p_hwfn, - "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 859caa6c1a1f..8e7be214f959 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -8197,6 +8197,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } + /* Re-populate nvm attribute info */ + qed_mcp_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_populate(p_hwfn); + /* nvm cfg1 */ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 0bf91df80d47..4456ce5325a7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1368,6 +1368,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) void qed_resc_free(struct qed_dev *cdev) { + struct qed_rdma_info *rdma_info; + struct qed_hwfn *p_hwfn; int i; if (IS_VF(cdev)) { @@ -1385,7 +1387,8 @@ void qed_resc_free(struct qed_dev *cdev) qed_llh_free(cdev); for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + p_hwfn = cdev->hwfns + i; + rdma_info = p_hwfn->p_rdma_info; qed_cxt_mngr_free(p_hwfn); qed_qm_info_free(p_hwfn); @@ -1404,8 +1407,10 @@ void qed_resc_free(struct qed_dev *cdev) qed_ooo_free(p_hwfn); } - if (QED_IS_RDMA_PERSONALITY(p_hwfn)) + if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) { + qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto); qed_rdma_info_free(p_hwfn); + } qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); @@ -3087,7 +3092,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) } /* Log and clear previous pglue_b errors if such exist */ - qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); + qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); /* Enable the PF's internal FID_enable in the PXP */ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, @@ -4418,12 +4423,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return 0; } -static void qed_nvm_info_free(struct qed_hwfn *p_hwfn) -{ - kfree(p_hwfn->nvm_info.image_att); - p_hwfn->nvm_info.image_att = NULL; -} - static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, void __iomem *p_regview, void __iomem *p_doorbells, @@ -4508,7 +4507,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, return rc; err3: if (IS_LEAD_HWFN(p_hwfn)) - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); err2: if (IS_LEAD_HWFN(p_hwfn)) qed_iov_free_hw_info(p_hwfn->cdev); @@ -4569,7 +4568,7 @@ int qed_hw_prepare(struct qed_dev *cdev, if (rc) { if (IS_PF(cdev)) { qed_init_free(p_hwfn); - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); qed_mcp_free(p_hwfn); qed_hw_hwfn_free(p_hwfn); } @@ -4603,7 +4602,7 @@ void qed_hw_remove(struct qed_dev *cdev) qed_iov_free_hw_info(cdev); - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); } static void qed_chain_free_next_ptr(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 9f5113639eaf..666e43748a5f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -256,9 +256,10 @@ out: #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) #define PGLUE_ATTENTION_ILT_VALID (1 << 23) -int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool hw_init) { + char msg[256]; u32 tmp; tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); @@ -272,22 +273,23 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); - DP_NOTICE(p_hwfn, - "Illegal write by chip to [%08x:%08x] blocked.\n" - "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" - "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", - addr_hi, addr_lo, details, - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), - GET_FIELD(details, - PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, - tmp, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); + snprintf(msg, sizeof(msg), + "Illegal write by chip to [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID), + tmp, + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR), + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME), + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN)); + + if (hw_init) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); + else + DP_NOTICE(p_hwfn, "%s\n", msg); } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); @@ -320,8 +322,14 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); - if (tmp & PGLUE_ATTENTION_ICPL_VALID) - DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); + if (tmp & PGLUE_ATTENTION_ICPL_VALID) { + snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp); + + if (hw_init) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); + else + DP_NOTICE(p_hwfn, "%s\n", msg); + } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); if (tmp & PGLUE_ATTENTION_ZLR_VALID) { @@ -360,7 +368,7 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) { - return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); + return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); } #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) @@ -1172,7 +1180,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) index, attn_bits, attn_acks, asserted_bits, deasserted_bits, p_sb_attn_sw->known_attn); } else if (asserted_bits == 0x100) { - DP_INFO(p_hwfn, "MFW indication via attention\n"); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "MFW indication via attention\n"); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "MFW indication [deassertion]\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index d473b522afc5..ba5cfebf2d0d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -431,7 +431,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) -int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt); +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool hw_init); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 65ec16a31658..2b3102a2fe5c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -2832,8 +2832,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn) if (rc) return rc; - qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP); - return qed_iwarp_ll2_stop(p_hwfn); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 36ddb89856a8..9401b49275f0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3149,6 +3149,13 @@ err0: return rc; } +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->nvm_info.image_att); + p_hwfn->nvm_info.image_att = NULL; + p_hwfn->nvm_info.valid = false; +} + int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 9c4c2763de8d..e38297383b00 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1192,6 +1192,13 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); */ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); +/** + * @brief Delete nvm info shadow in the given hardware function + * + * @param p_hwfn + */ +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); + /** * @brief Get the engine affinity configuration. * diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index e49fada85410..83817bb50e9f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -113,7 +113,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn) break; } } - qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE); } static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 856051f50eb7..adc2c8f3d48e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); } +#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90 +#define QED_VF_CHANNEL_USLEEP_DELAY 100 +#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10 +#define QED_VF_CHANNEL_MSLEEP_DELAY 25 + static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) { union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; struct ustorm_trigger_vf_zone trigger; struct ustorm_vf_zone *zone_data; - int rc = 0, time = 100; + int iter, rc = 0; zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; @@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); /* When PF would be done with the response, it would write back to the - * `done' address. Poll until then. + * `done' address from a coherent DMA zone. Poll until then. */ - while ((!*done) && time) { - msleep(25); - time--; + + iter = QED_VF_CHANNEL_USLEEP_ITERATIONS; + while (!*done && iter--) { + udelay(QED_VF_CHANNEL_USLEEP_DELAY); + dma_rmb(); + } + + iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS; + while (!*done && iter--) { + msleep(QED_VF_CHANNEL_MSLEEP_DELAY); + dma_rmb(); } if (!*done) { diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 1f27f9866b80..61b5aa3e5b98 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -574,12 +574,14 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) #define NUM_RX_BDS_MIN 128 +#define NUM_RX_BDS_KDUMP_MIN 63 #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1) #define TX_RING_SIZE_POW 13 #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) #define NUM_TX_BDS_MIN 128 +#define NUM_TX_BDS_KDUMP_MIN 63 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX #define QEDE_MIN_PKT_LEN 64 diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index ba53612ae0df..2c3d654c8454 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -29,6 +29,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include #include #include #include @@ -707,8 +708,14 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, edev->dp_module = dp_module; edev->dp_level = dp_level; edev->ops = qed_ops; - edev->q_num_rx_buffers = NUM_RX_BDS_DEF; - edev->q_num_tx_buffers = NUM_TX_BDS_DEF; + + if (is_kdump_kernel()) { + edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN; + edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN; + } else { + edev->q_num_rx_buffers = NUM_RX_BDS_DEF; + edev->q_num_tx_buffers = NUM_TX_BDS_DEF; + } DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", info->num_queues, info->num_queues); @@ -1151,7 +1158,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, /* PTP not supported on VFs */ if (!is_vf) - qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL)); + qede_ptp_enable(edev); edev->ops->register_ops(cdev, &qede_ll_ops, edev); @@ -1240,6 +1247,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) if (system_state == SYSTEM_POWER_OFF) return; qed_ops->common->remove(cdev); + edev->cdev = NULL; /* Since this can happen out-of-sync with other flows, * don't release the netdevice until after slowpath stop diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index f815435cf106..2d3b2fa92df5 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -411,6 +411,7 @@ void qede_ptp_disable(struct qede_dev *edev) if (ptp->tx_skb) { dev_kfree_skb_any(ptp->tx_skb); ptp->tx_skb = NULL; + clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); } /* Disable PTP in HW */ @@ -422,7 +423,7 @@ void qede_ptp_disable(struct qede_dev *edev) edev->ptp = NULL; } -static int qede_ptp_init(struct qede_dev *edev, bool init_tc) +static int qede_ptp_init(struct qede_dev *edev) { struct qede_ptp *ptp; int rc; @@ -443,25 +444,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc) /* Init work queue for Tx timestamping */ INIT_WORK(&ptp->work, qede_ptp_task); - /* Init cyclecounter and timecounter. This is done only in the first - * load. If done in every load, PTP application will fail when doing - * unload / load (e.g. MTU change) while it is running. - */ - if (init_tc) { - memset(&ptp->cc, 0, sizeof(ptp->cc)); - ptp->cc.read = qede_ptp_read_cc; - ptp->cc.mask = CYCLECOUNTER_MASK(64); - ptp->cc.shift = 0; - ptp->cc.mult = 1; + /* Init cyclecounter and timecounter */ + memset(&ptp->cc, 0, sizeof(ptp->cc)); + ptp->cc.read = qede_ptp_read_cc; + ptp->cc.mask = CYCLECOUNTER_MASK(64); + ptp->cc.shift = 0; + ptp->cc.mult = 1; - timecounter_init(&ptp->tc, &ptp->cc, - ktime_to_ns(ktime_get_real())); - } + timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real())); - return rc; + return 0; } -int qede_ptp_enable(struct qede_dev *edev, bool init_tc) +int qede_ptp_enable(struct qede_dev *edev) { struct qede_ptp *ptp; int rc; @@ -482,7 +477,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc) edev->ptp = ptp; - rc = qede_ptp_init(edev, init_tc); + rc = qede_ptp_init(edev); if (rc) goto err1; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h index 691a14c4b2c5..89c7f3cf3ee2 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -41,7 +41,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb); void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb); int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req); void qede_ptp_disable(struct qede_dev *edev); -int qede_ptp_enable(struct qede_dev *edev, bool init_tc); +int qede_ptp_enable(struct qede_dev *edev); int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts); static inline void qede_ptp_record_rx_ts(struct qede_dev *edev, diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 2d873ae8a234..668ccc9d49f8 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -105,6 +105,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev) qede_rdma_cleanup_event(edev); destroy_workqueue(edev->rdma_info.rdma_wq); + edev->rdma_info.rdma_wq = NULL; } int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) @@ -325,7 +326,7 @@ static void qede_rdma_add_event(struct qede_dev *edev, if (edev->rdma_info.exp_recovery) return; - if (!edev->rdma_info.qedr_dev) + if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq) return; /* We don't want the cleanup flow to start while we're allocating and diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index c84ab052ef26..3eee8df359a1 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -485,13 +485,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev, ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]); if (ret) - return ret; + goto disable_clk_axi; ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000); if (ret) - return ret; + goto disable_clk_cfg_ahb; - return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); + if (ret) + goto disable_clk_cfg_ahb; + + return 0; + +disable_clk_cfg_ahb: + clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]); +disable_clk_axi: + clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]); + + return ret; } /* Enable clocks; needs emac_clks_phase1_init to be called before */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 02cdbb22d335..18d88b424828 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -47,15 +47,23 @@ static int rmnet_unregister_real_device(struct net_device *real_dev) return 0; } -static int rmnet_register_real_device(struct net_device *real_dev) +static int rmnet_register_real_device(struct net_device *real_dev, + struct netlink_ext_ack *extack) { struct rmnet_port *port; int rc, entry; ASSERT_RTNL(); - if (rmnet_is_real_dev_registered(real_dev)) + if (rmnet_is_real_dev_registered(real_dev)) { + port = rmnet_get_port_rtnl(real_dev); + if (port->rmnet_mode != RMNET_EPMODE_VND) { + NL_SET_ERR_MSG_MOD(extack, "bridge device already exists"); + return -EINVAL; + } + return 0; + } port = kzalloc(sizeof(*port), GFP_ATOMIC); if (!port) @@ -134,7 +142,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); - err = rmnet_register_real_device(real_dev); + err = rmnet_register_real_device(real_dev, extack); if (err) goto err0; @@ -416,13 +424,10 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, if (port->nr_rmnet_devs > 1) return -EINVAL; - if (port->rmnet_mode != RMNET_EPMODE_VND) - return -EINVAL; - if (rmnet_is_real_dev_registered(slave_dev)) return -EBUSY; - err = rmnet_register_real_device(slave_dev); + err = rmnet_register_real_device(slave_dev, extack); if (err) return -EBUSY; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 3f165c137236..907ae1359a7c 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1336,6 +1336,51 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, return error; } +/* MDIO bus init function */ +static int ravb_mdio_init(struct ravb_private *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + int error; + + /* Bitbang init */ + priv->mdiobb.ops = &bb_ops; + + /* MII controller setting */ + priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); + if (!priv->mii_bus) + return -ENOMEM; + + /* Hook up MII support for ethtool */ + priv->mii_bus->name = "ravb_mii"; + priv->mii_bus->parent = dev; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* Register MDIO bus */ + error = of_mdiobus_register(priv->mii_bus, dev->of_node); + if (error) + goto out_free_bus; + + return 0; + +out_free_bus: + free_mdio_bitbang(priv->mii_bus); + return error; +} + +/* MDIO bus release function */ +static int ravb_mdio_release(struct ravb_private *priv) +{ + /* Unregister mdio bus */ + mdiobus_unregister(priv->mii_bus); + + /* Free bitbang info */ + free_mdio_bitbang(priv->mii_bus); + + return 0; +} + /* Network device open function for Ethernet AVB */ static int ravb_open(struct net_device *ndev) { @@ -1344,6 +1389,13 @@ static int ravb_open(struct net_device *ndev) struct device *dev = &pdev->dev; int error; + /* MDIO bus init */ + error = ravb_mdio_init(priv); + if (error) { + netdev_err(ndev, "failed to initialize MDIO\n"); + return error; + } + napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_NC]); @@ -1421,6 +1473,7 @@ out_free_irq: out_napi_off: napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); return error; } @@ -1444,6 +1497,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) struct ravb_private *priv = container_of(work, struct ravb_private, work); struct net_device *ndev = priv->ndev; + int error; netif_tx_stop_all_queues(ndev); @@ -1452,15 +1506,36 @@ static void ravb_tx_timeout_work(struct work_struct *work) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ - ravb_stop_dma(ndev); + if (ravb_stop_dma(ndev)) { + /* If ravb_stop_dma() fails, the hardware is still operating + * for TX and/or RX. So, this should not call the following + * functions because ravb_dmac_init() is possible to fail too. + * Also, this should not retry ravb_stop_dma() again and again + * here because it's possible to wait forever. So, this just + * re-enables the TX and RX and skip the following + * re-initialization procedure. + */ + ravb_rcv_snd_enable(ndev); + goto out; + } ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC); /* Device init */ - ravb_dmac_init(ndev); + error = ravb_dmac_init(ndev); + if (error) { + /* If ravb_dmac_init() fails, descriptors are freed. So, this + * should return here to avoid re-enabling the TX and RX in + * ravb_emac_init(). + */ + netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", + __func__, error); + return; + } ravb_emac_init(ndev); +out: /* Initialise PTP Clock driver */ if (priv->chip_id == RCAR_GEN2) ravb_ptp_init(ndev, priv->pdev); @@ -1708,6 +1783,8 @@ static int ravb_close(struct net_device *ndev) ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC); + ravb_mdio_release(priv); + return 0; } @@ -1859,51 +1936,6 @@ static const struct net_device_ops ravb_netdev_ops = { .ndo_set_features = ravb_set_features, }; -/* MDIO bus init function */ -static int ravb_mdio_init(struct ravb_private *priv) -{ - struct platform_device *pdev = priv->pdev; - struct device *dev = &pdev->dev; - int error; - - /* Bitbang init */ - priv->mdiobb.ops = &bb_ops; - - /* MII controller setting */ - priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); - if (!priv->mii_bus) - return -ENOMEM; - - /* Hook up MII support for ethtool */ - priv->mii_bus->name = "ravb_mii"; - priv->mii_bus->parent = dev; - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); - - /* Register MDIO bus */ - error = of_mdiobus_register(priv->mii_bus, dev->of_node); - if (error) - goto out_free_bus; - - return 0; - -out_free_bus: - free_mdio_bitbang(priv->mii_bus); - return error; -} - -/* MDIO bus release function */ -static int ravb_mdio_release(struct ravb_private *priv) -{ - /* Unregister mdio bus */ - mdiobus_unregister(priv->mii_bus); - - /* Free bitbang info */ - free_mdio_bitbang(priv->mii_bus); - - return 0; -} - static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, @@ -2144,13 +2176,6 @@ static int ravb_probe(struct platform_device *pdev) eth_hw_addr_random(ndev); } - /* MDIO bus init */ - error = ravb_mdio_init(priv); - if (error) { - dev_err(&pdev->dev, "failed to initialize MDIO\n"); - goto out_dma_free; - } - netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); @@ -2172,8 +2197,6 @@ static int ravb_probe(struct platform_device *pdev) out_napi_del: netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); -out_dma_free: dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); @@ -2205,7 +2228,6 @@ static int ravb_remove(struct platform_device *pdev) unregister_netdev(ndev); netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); pm_runtime_disable(&pdev->dev); free_netdev(ndev); platform_set_drvdata(pdev, NULL); diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 786b158bd305..5abb3f9684ff 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -647,10 +647,10 @@ static int rocker_dma_rings_init(struct rocker *rocker) err_dma_event_ring_bufs_alloc: rocker_dma_ring_destroy(rocker, &rocker->event_ring); err_dma_event_ring_create: + rocker_dma_cmd_ring_waits_free(rocker); +err_dma_cmd_ring_waits_alloc: rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, PCI_DMA_BIDIRECTIONAL); -err_dma_cmd_ring_waits_alloc: - rocker_dma_cmd_ring_waits_free(rocker); err_dma_cmd_ring_bufs_alloc: rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); return err; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 3a6761131f4c..2248d2674612 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2274,7 +2274,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, "power", 0, 0, 100); if (ret) - return ret; + goto out_free_netdev; /* * Optional reset GPIO configured? Minimum 100 ns reset needed @@ -2283,7 +2283,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, "reset", 0, 0, 100); if (ret) - return ret; + goto out_free_netdev; /* * Need to wait for optional EEPROM to load, max 750 us according diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 38d39c4b5ac8..603d54f83399 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1191,7 +1191,7 @@ static int ave_init(struct net_device *ndev) ret = regmap_update_bits(priv->regmap, SG_ETPINMODE, priv->pinmode_mask, priv->pinmode_val); if (ret) - return ret; + goto out_reset_assert; ave_global_reset(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 4d75158c64b2..826626e870d5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -350,6 +350,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->has_gmac = true; plat_dat->bsp_priv = gmac; plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; + plat_dat->multicast_filter_bins = 0; err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (err) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index bc9b01376e80..1d0b64bd1e1a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -166,6 +166,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF; } else if (dev->flags & IFF_ALLMULTI) { value = GMAC_FRAME_FILTER_PM; /* pass all multi */ + } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) { + /* Fall back to all multicast if we've no filter */ + value = GMAC_FRAME_FILTER_PM; } else if (!netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 39df8c8feb6c..e7b4d93e3f28 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2209,7 +2209,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, HOST_PORT_NUM, ALE_VLAN, vid); ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 0, ALE_VLAN, vid); - ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid); + ret |= cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid); err: pm_runtime_put(cpsw->dev); return ret; diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 38b7f6d35759..702fdc393da0 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -397,6 +397,8 @@ static int davinci_mdio_probe(struct platform_device *pdev) data->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; data->regs = devm_ioremap(dev, res->start, resource_size(res)); if (!data->regs) return -ENOMEM; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 538e70810d3d..a99c7c95de5c 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -283,8 +283,8 @@ spider_net_free_chain(struct spider_net_card *card, descr = descr->next; } while (descr != chain->ring); - dma_free_coherent(&card->pdev->dev, chain->num_desc, - chain->hwring, chain->dma_addr); + dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr), + chain->hwring, chain->dma_addr); } /** diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index aa101f72d405..adfdf6260b26 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -987,9 +987,10 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) if (geneve->collect_md) { info = skb_tunnel_info(skb); if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { - err = -EINVAL; netdev_dbg(dev, "no tunnel metadata\n"); - goto tx_error; + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; } } else { info = &geneve->info; @@ -1006,7 +1007,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!err)) return NETDEV_TX_OK; -tx_error: + dev_kfree_skb(skb); if (err == -ELOOP) @@ -1614,11 +1615,11 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct geneve_dev *geneve = netdev_priv(dev); + enum ifla_geneve_df df = geneve->df; struct geneve_sock *gs4, *gs6; struct ip_tunnel_info info; bool metadata; bool use_udp6_rx_checksums; - enum ifla_geneve_df df; bool ttl_inherit; int err; @@ -1648,6 +1649,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], geneve->collect_md = metadata; geneve->use_udp6_rx_checksums = use_udp6_rx_checksums; geneve->ttl_inherit = ttl_inherit; + geneve->df = df; geneve_unquiesce(geneve, gs4, gs6); return 0; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index d89ec99abcd6..634bdea38ecb 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1182,6 +1182,7 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, goto nlmsg_failure; if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || + nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) goto nla_put_failure; diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 71cdef9fb56b..5ab53e9942f3 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -1133,6 +1133,7 @@ static int __init yam_init_driver(void) err = register_netdev(dev); if (err) { printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name); + free_netdev(dev); goto error; } yam_devs[i] = dev; diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 2a6ec5394966..a4b3fce69ecd 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1242,7 +1242,7 @@ static int rr_open(struct net_device *dev) rrpriv->info = NULL; } if (rrpriv->rx_ctrl) { - pci_free_consistent(pdev, sizeof(struct ring_ctrl), + pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl), rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); rrpriv->rx_ctrl = NULL; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index ca16ae8c8332..362b7ca6f3b2 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -366,7 +366,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, } rcu_read_unlock(); - while (unlikely(txq >= ndev->real_num_tx_queues)) + while (txq >= ndev->real_num_tx_queues) txq -= ndev->real_num_tx_queues; return txq; @@ -501,7 +501,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, int rc; skb->dev = vf_netdev; - skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); rc = dev_queue_xmit(skb); if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { @@ -531,12 +531,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) u32 hash; struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; - /* if VF is present and up then redirect packets - * already called with rcu_read_lock_bh + /* If VF is present and up then redirect packets to it. + * Skip the VF if it is marked down or has no carrier. + * If netpoll is in uses, then VF can not be used either. */ vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); if (vf_netdev && netif_running(vf_netdev) && - !netpoll_tx_running(net)) + netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net)) return netvsc_vf_xmit(net, vf_netdev, skb); /* We will atmost need two pages to describe the rndis diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 5a37514e4234..8dbccec6ac86 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1262,7 +1262,7 @@ static int adf7242_probe(struct spi_device *spi) WQ_MEM_RECLAIM); if (unlikely(!lp->wqueue)) { ret = -ENOMEM; - goto err_hw_init; + goto err_alloc_wq; } ret = adf7242_hw_init(lp); @@ -1294,6 +1294,8 @@ static int adf7242_probe(struct spi_device *spi) return ret; err_hw_init: + destroy_workqueue(lp->wqueue); +err_alloc_wq: mutex_destroy(&lp->bmux); ieee802154_free_hw(lp->hw); diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index b805abc9ec3b..5fbabae2909e 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -106,12 +106,21 @@ static void ipvlan_port_destroy(struct net_device *dev) kfree(port); } +#define IPVLAN_ALWAYS_ON_OFLOADS \ + (NETIF_F_SG | NETIF_F_HW_CSUM | \ + NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL) + +#define IPVLAN_ALWAYS_ON \ + (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED) + #define IPVLAN_FEATURES \ - (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */ + #define IPVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) @@ -125,7 +134,9 @@ static int ipvlan_init(struct net_device *dev) dev->state = (dev->state & ~IPVLAN_STATE_MASK) | (phy_dev->state & IPVLAN_STATE_MASK); dev->features = phy_dev->features & IPVLAN_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED; + dev->features |= IPVLAN_ALWAYS_ON; + dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES; + dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS; dev->hw_enc_features |= dev->features; dev->gso_max_size = phy_dev->gso_max_size; dev->gso_max_segs = phy_dev->gso_max_segs; @@ -225,7 +236,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev, { struct ipvl_dev *ipvlan = netdev_priv(dev); - return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features |= NETIF_F_ALL_FOR_ALL; + features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features = netdev_increment_features(ipvlan->phy_dev->features, + features, features); + features |= IPVLAN_ALWAYS_ON; + features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON); + + return features; } static void ipvlan_change_rx_flags(struct net_device *dev, int change) @@ -732,10 +750,9 @@ static int ipvlan_device_event(struct notifier_block *unused, case NETDEV_FEAT_CHANGE: list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - ipvlan->dev->features = dev->features & IPVLAN_FEATURES; ipvlan->dev->gso_max_size = dev->gso_max_size; ipvlan->dev->gso_max_segs = dev->gso_max_segs; - netdev_features_change(ipvlan->dev); + netdev_update_features(ipvlan->dev); } break; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 0ce1004a8d0d..07622cf8765a 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -447,6 +447,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) int ret; rx_handler_result_t handle_res; + /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ + if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) + return RX_HANDLER_PASS; + port = macvlan_port_get_rcu(skb->dev); if (is_multicast_ether_addr(eth->h_dest)) { unsigned int hash; @@ -1255,6 +1259,9 @@ static void macvlan_port_destroy(struct net_device *dev) static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + struct nlattr *nla, *head; + int rem, len; + if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; @@ -1302,6 +1309,20 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], return -EADDRNOTAVAIL; } + if (data[IFLA_MACVLAN_MACADDR_DATA]) { + head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); + len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); + + nla_for_each_attr(nla, head, len, rem) { + if (nla_type(nla) != IFLA_MACVLAN_MACADDR || + nla_len(nla) != ETH_ALEN) + return -EINVAL; + + if (!is_valid_ether_addr(nla_data(nla))) + return -EADDRNOTAVAIL; + } + } + if (data[IFLA_MACVLAN_MACADDR_COUNT]) return -EINVAL; @@ -1358,10 +1379,6 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode, len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); nla_for_each_attr(nla, head, len, rem) { - if (nla_type(nla) != IFLA_MACVLAN_MACADDR || - nla_len(nla) != ETH_ALEN) - continue; - addr = nla_data(nla); ret = macvlan_hash_add_source(vlan, addr); if (ret) diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 55f57f76d01b..a6bbe93f29ef 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -301,7 +301,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) rtnl_lock(); err = nsim_bpf_init(ns); if (err) - goto err_free_netdev; + goto err_rtnl_unlock; nsim_ipsec_init(ns); @@ -315,8 +315,8 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) err_ipsec_teardown: nsim_ipsec_teardown(ns); nsim_bpf_uninit(ns); +err_rtnl_unlock: rtnl_unlock(); -err_free_netdev: free_netdev(dev); return ERR_PTR(err); } diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 1c75b2627ca8..7d845117abb0 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1348,6 +1348,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4; dp83640->version = PTP_CLASS_V1; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: @@ -1355,6 +1356,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: @@ -1362,6 +1364,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L2; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: @@ -1369,6 +1372,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; default: return -ERANGE; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index a7796134e3be..91cf1d167263 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -358,7 +358,7 @@ static int m88e1101_config_aneg(struct phy_device *phydev) return marvell_config_aneg(phydev); } -#ifdef CONFIG_OF_MDIO +#if IS_ENABLED(CONFIG_OF_MDIO) /* Set and/or override some configuration registers based on the * marvell,reg-init property stored in the of_node for the phydev. * diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index f720c519e8e8..92ffd5e84bc7 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -624,7 +624,9 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, if (c45_ids) dev->c45_ids = *c45_ids; dev->irq = bus->irq[addr]; + dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr); + device_initialize(&mdiodev->dev); dev->state = PHY_DOWN; @@ -658,10 +660,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, ret = phy_request_driver_module(dev, phy_id); } - if (!ret) { - device_initialize(&mdiodev->dev); - } else { - kfree(dev); + if (ret) { + put_device(&mdiodev->dev); dev = ERR_PTR(ret); } @@ -806,8 +806,10 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, /* Grab the bits from PHYIR2, and put them in the lower half */ phy_reg = mdiobus_read(bus, addr, MII_PHYSID2); - if (phy_reg < 0) - return -EIO; + if (phy_reg < 0) { + /* returning -ENODEV doesn't stop bus scanning */ + return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO; + } *phy_id |= phy_reg; diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index b23fc41896ef..816e59fe68f5 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -9,6 +9,12 @@ #include "sfp.h" +struct sfp_quirk { + const char *vendor; + const char *part; + void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes); +}; + /** * struct sfp_bus - internal representation of a sfp bus */ @@ -21,6 +27,7 @@ struct sfp_bus { const struct sfp_socket_ops *socket_ops; struct device *sfp_dev; struct sfp *sfp; + const struct sfp_quirk *sfp_quirk; const struct sfp_upstream_ops *upstream_ops; void *upstream; @@ -30,6 +37,71 @@ struct sfp_bus { bool started; }; +static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id, + unsigned long *modes) +{ + phylink_set(modes, 2500baseX_Full); +} + +static const struct sfp_quirk sfp_quirks[] = { + { + // Alcatel Lucent G-010S-P can operate at 2500base-X, but + // incorrectly report 2500MBd NRZ in their EEPROM + .vendor = "ALCATELLUCENT", + .part = "G010SP", + .modes = sfp_quirk_2500basex, + }, { + // Alcatel Lucent G-010S-A can operate at 2500base-X, but + // report 3.2GBd NRZ in their EEPROM + .vendor = "ALCATELLUCENT", + .part = "3FE46541AA", + .modes = sfp_quirk_2500basex, + }, { + // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd + // NRZ in their EEPROM + .vendor = "HUAWEI", + .part = "MA5671A", + .modes = sfp_quirk_2500basex, + }, +}; + +static size_t sfp_strlen(const char *str, size_t maxlen) +{ + size_t size, i; + + /* Trailing characters should be filled with space chars */ + for (i = 0, size = 0; i < maxlen; i++) + if (str[i] != ' ') + size = i + 1; + + return size; +} + +static bool sfp_match(const char *qs, const char *str, size_t len) +{ + if (!qs) + return true; + if (strlen(qs) != len) + return false; + return !strncmp(qs, str, len); +} + +static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id) +{ + const struct sfp_quirk *q; + unsigned int i; + size_t vs, ps; + + vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name)); + ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn)); + + for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++) + if (sfp_match(q->vendor, id->base.vendor_name, vs) && + sfp_match(q->part, id->base.vendor_pn, ps)) + return q; + + return NULL; +} /** * sfp_parse_port() - Parse the EEPROM base ID, setting the port type * @bus: a pointer to the &struct sfp_bus structure for the sfp module @@ -233,6 +305,9 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, phylink_set(modes, 1000baseX_Full); } + if (bus->sfp_quirk) + bus->sfp_quirk->modes(id, modes); + bitmap_or(support, support, modes, __ETHTOOL_LINK_MODE_MASK_NBITS); phylink_set(support, Autoneg); @@ -553,6 +628,8 @@ int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id) const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); int ret = 0; + bus->sfp_quirk = sfp_lookup_quirk(id); + if (ops && ops->module_insert) ret = ops->module_insert(bus->upstream, id); @@ -566,6 +643,8 @@ void sfp_module_remove(struct sfp_bus *bus) if (ops && ops->module_remove) ops->module_remove(bus->upstream); + + bus->sfp_quirk = NULL; } EXPORT_SYMBOL_GPL(sfp_module_remove); diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index e39f41efda3e..7bc6e8f856fe 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -296,7 +296,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal) netdev_dbg(dev->net, "asix_get_phy_addr()\n"); - if (ret < 0) { + if (ret < 2) { netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret); goto out; } diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index af3994e0853b..6101d82102e7 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -198,6 +198,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); if (ret < ETH_ALEN) { netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); + ret = -EIO; goto free; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index daa54486ab09..df2f7cc6dc03 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1387,10 +1387,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } if (pkt_cnt == 0) { - /* Skip IP alignment psudo header */ - skb_pull(skb, 2); skb->len = pkt_len; - skb_set_tail_pointer(skb, pkt_len); + /* Skip IP alignment pseudo header */ + skb_pull(skb, 2); + skb_set_tail_pointer(skb, skb->len); skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(skb, pkt_hdr); return 1; @@ -1399,8 +1399,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) ax_skb = skb_clone(skb, GFP_ATOMIC); if (ax_skb) { ax_skb->len = pkt_len; - ax_skb->data = skb->data + 2; - skb_set_tail_pointer(ax_skb, pkt_len); + /* Skip IP alignment pseudo header */ + skb_pull(ax_skb, 2); + skb_set_tail_pointer(ax_skb, ax_skb->len); ax_skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(ax_skb, pkt_hdr); usbnet_skb_return(dev, ax_skb); diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index b91f92e4e5f2..915ac75b55fc 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -625,6 +625,10 @@ static const struct usb_device_id products[] = { USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */ .driver_info = (unsigned long)&dm9601_info, }, + { + USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */ + .driver_info = (unsigned long)&dm9601_info, + }, {}, // END }; diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 74849da031fa..7449b97a3c89 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1389,8 +1389,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) unsigned long flags; if (old) - hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n", - tty->termios.c_cflag, old->c_cflag); + hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n", + (unsigned int)tty->termios.c_cflag, + (unsigned int)old->c_cflag); /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); @@ -2259,12 +2260,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, minor = get_free_serial_index(); if (minor < 0) - goto exit; + goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, tty_drv, minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); + if (IS_ERR(serial->parent->dev)) + goto exit2; /* fill in specific data for later use */ serial->minor = minor; @@ -2309,6 +2312,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, return 0; exit: hso_serial_tty_unregister(serial); +exit2: hso_serial_common_free(serial); return -1; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 0170a441208a..71cc5b63d8ce 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -377,10 +377,6 @@ struct lan78xx_net { struct tasklet_struct bh; struct delayed_work wq; - struct usb_host_endpoint *ep_blkin; - struct usb_host_endpoint *ep_blkout; - struct usb_host_endpoint *ep_intr; - int msg_enable; struct urb *urb_intr; @@ -2868,78 +2864,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) return NETDEV_TX_OK; } -static int -lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf) -{ - int tmp; - struct usb_host_interface *alt = NULL; - struct usb_host_endpoint *in = NULL, *out = NULL; - struct usb_host_endpoint *status = NULL; - - for (tmp = 0; tmp < intf->num_altsetting; tmp++) { - unsigned ep; - - in = NULL; - out = NULL; - status = NULL; - alt = intf->altsetting + tmp; - - for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { - struct usb_host_endpoint *e; - int intr = 0; - - e = alt->endpoint + ep; - switch (e->desc.bmAttributes) { - case USB_ENDPOINT_XFER_INT: - if (!usb_endpoint_dir_in(&e->desc)) - continue; - intr = 1; - /* FALLTHROUGH */ - case USB_ENDPOINT_XFER_BULK: - break; - default: - continue; - } - if (usb_endpoint_dir_in(&e->desc)) { - if (!intr && !in) - in = e; - else if (intr && !status) - status = e; - } else { - if (!out) - out = e; - } - } - if (in && out) - break; - } - if (!alt || !in || !out) - return -EINVAL; - - dev->pipe_in = usb_rcvbulkpipe(dev->udev, - in->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->pipe_out = usb_sndbulkpipe(dev->udev, - out->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->ep_intr = status; - - return 0; -} - static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) { struct lan78xx_priv *pdata = NULL; int ret; int i; - ret = lan78xx_get_endpoints(dev, intf); - if (ret) { - netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", - ret); - return ret; - } - dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); pdata = (struct lan78xx_priv *)(dev->data[0]); @@ -3708,6 +3638,7 @@ static void lan78xx_stat_monitor(struct timer_list *t) static int lan78xx_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr; struct lan78xx_net *dev; struct net_device *netdev; struct usb_device *udev; @@ -3756,6 +3687,34 @@ static int lan78xx_probe(struct usb_interface *intf, mutex_init(&dev->stats.access_lock); + if (intf->cur_altsetting->desc.bNumEndpoints < 3) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); + ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in); + if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); + ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out); + if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) { + ret = -ENODEV; + goto out2; + } + + ep_intr = &intf->cur_altsetting->endpoint[2]; + if (!usb_endpoint_is_int_in(&ep_intr->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_intr = usb_rcvintpipe(dev->udev, + usb_endpoint_num(&ep_intr->desc)); + ret = lan78xx_bind(dev, intf); if (ret < 0) goto out2; @@ -3767,18 +3726,7 @@ static int lan78xx_probe(struct usb_interface *intf, netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); - dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; - dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; - dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; - - dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); - dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); - - dev->pipe_intr = usb_rcvintpipe(dev->udev, - dev->ep_intr->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - period = dev->ep_intr->desc.bInterval; - + period = ep_intr->desc.bInterval; maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0); buf = kmalloc(maxp, GFP_KERNEL); if (buf) { @@ -3791,6 +3739,7 @@ static int lan78xx_probe(struct usb_interface *intf, usb_fill_int_urb(dev->urb_intr, dev->udev, dev->pipe_intr, buf, maxp, intr_complete, dev, period); + dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; } } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 4a2c7355be63..e57d59b0a7ae 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1370,6 +1370,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 355be77f4241..bb4ccbda031a 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1287,11 +1287,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) /* Init all registers */ ret = smsc95xx_reset(dev); + if (ret) + goto free_pdata; /* detect device revision as different features may be available */ ret = smsc95xx_read_reg(dev, ID_REV, &val); if (ret < 0) - return ret; + goto free_pdata; + val >>= 16; pdata->chip_id = val; pdata->mdix_ctrl = get_mdix_status(dev->net); @@ -1317,6 +1320,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); return 0; + +free_pdata: + kfree(pdata); + return ret; } static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -1324,7 +1331,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); if (pdata) { - cancel_delayed_work(&pdata->carrier_check); + cancel_delayed_work_sync(&pdata->carrier_check); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); pdata = NULL; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 9f3c839f9e5f..88cfd63f08a6 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -510,13 +510,15 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, struct veth_xdp_tx_bq *bq) { void *hard_start = frame->data - frame->headroom; - void *head = hard_start - sizeof(struct xdp_frame); int len = frame->len, delta = 0; struct xdp_frame orig_frame; struct bpf_prog *xdp_prog; unsigned int headroom; struct sk_buff *skb; + /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */ + hard_start -= sizeof(struct xdp_frame); + rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (likely(xdp_prog)) { @@ -538,7 +540,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, break; case XDP_TX: orig_frame = *frame; - xdp.data_hard_start = head; xdp.rxq->mem = frame->mem; if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) { trace_xdp_exception(rq->dev, xdp_prog, act); @@ -550,7 +551,6 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, goto xdp_xmit; case XDP_REDIRECT: orig_frame = *frame; - xdp.data_hard_start = head; xdp.rxq->mem = frame->mem; if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) { frame = &orig_frame; @@ -572,7 +572,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, rcu_read_unlock(); headroom = sizeof(struct xdp_frame) + frame->headroom - delta; - skb = veth_build_skb(head, headroom, len, 0); + skb = veth_build_skb(hard_start, headroom, len, 0); if (!skb) { xdp_return_frame(frame); goto err; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 216acf37ca7c..a06e6ab453f5 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -861,7 +861,8 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, switch (protocol) { case IPPROTO_TCP: - ctx->l4_hdr_size = tcp_hdrlen(skb); + ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) : + tcp_hdrlen(skb); break; case IPPROTO_UDP: ctx->l4_hdr_size = sizeof(struct udphdr); diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 0a38c76688ab..5e2571d23ab9 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -702,6 +702,8 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) *hfunc = ETH_RSS_HASH_TOP; if (!p) return 0; + if (n > UPT1_RSS_MAX_IND_TABLE_SIZE) + return 0; while (n--) p[n] = rssConf->indTable[n]; return 0; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 03434db36b5c..f9edc76580d9 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1225,6 +1225,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, for (h = 0; h < FDB_HASH_SIZE; ++h) { struct vxlan_fdb *f; + rcu_read_lock(); hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { struct vxlan_rdst *rd; @@ -1237,12 +1238,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, rd); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto out; + } skip: *idx += 1; } } + rcu_read_unlock(); } out: return err; @@ -2546,7 +2550,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = &rt->dst; skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM); - tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), vni, md, flags, udp_sum); @@ -2586,7 +2590,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM); - tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), @@ -2863,8 +2867,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP))) continue; /* the all_zeros_mac entry is deleted at vxlan_uninit */ - if (!is_zero_ether_addr(f->eth_addr)) - vxlan_fdb_destroy(vxlan, f, true, true); + if (is_zero_ether_addr(f->eth_addr) && + f->vni == vxlan->cfg.vni) + continue; + vxlan_fdb_destroy(vxlan, f, true, true); } spin_unlock_bh(&vxlan->hash_lock[h]); } diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index dd1a147f2971..058d77d2e693 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -200,7 +200,7 @@ config WANXL_BUILD_FIRMWARE depends on WANXL && !PREVENT_FIRMWARE_BUILD help Allows you to rebuild firmware run by the QUICC processor. - It requires as68k, ld68k and hexdump programs. + It requires m68k toolchains and hexdump programs. You should never need this option, say N. diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 701f5d2fe3b6..cf7a0a65aae8 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -40,17 +40,17 @@ $(obj)/wanxl.o: $(obj)/wanxlfw.inc ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y) ifeq ($(ARCH),m68k) - AS68K = $(AS) - LD68K = $(LD) + M68KCC = $(CC) + M68KLD = $(LD) else - AS68K = as68k - LD68K = ld68k + M68KCC = $(CROSS_COMPILE_M68K)gcc + M68KLD = $(CROSS_COMPILE_M68K)ld endif quiet_cmd_build_wanxlfw = BLD FW $@ cmd_build_wanxlfw = \ - $(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \ - $(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \ + $(M68KCC) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi -c -o $(obj)/wanxlfw.o $<; \ + $(M68KLD) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \ hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \ rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index a030f5aa6b95..cc33441af469 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -370,6 +370,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) memcpy(&state(hdlc)->settings, &new_settings, size); spin_lock_init(&state(hdlc)->lock); dev->header_ops = &cisco_header_ops; + dev->hard_header_len = sizeof(struct hdlc_header); dev->type = ARPHRD_CISCO; call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_on(dev); diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index bf78073ee7fd..e2a83f4cd3bb 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -62,8 +62,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) { unsigned char *ptr; - if (skb_cow(skb, 1)) + if (skb_cow(skb, 1)) { + kfree_skb(skb); return NET_RX_DROP; + } skb_push(skb, 1); skb_reset_network_header(skb); diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 0f1217b506ad..2cff914aada5 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -128,10 +128,12 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) { unsigned char *ptr; - skb_push(skb, 1); - - if (skb_cow(skb, 1)) + if (skb_cow(skb, 1)) { + kfree_skb(skb); return NET_RX_DROP; + } + + skb_push(skb, 1); ptr = skb->data; *ptr = X25_IFACE_DATA; @@ -155,6 +157,12 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, if (!netif_running(dev)) goto drop; + /* There should be a pseudo header of 1 byte added by upper layers. + * Check to make sure it is there before reading it. + */ + if (skb->len < 1) + goto drop; + switch (skb->data[0]) { case X25_IFACE_DATA: break; @@ -202,6 +210,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) skb->dev = dev = lapbeth->ethdev; + skb_reset_network_header(skb); + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); dev_queue_xmit(skb); @@ -303,7 +313,7 @@ static void lapbeth_setup(struct net_device *dev) dev->netdev_ops = &lapbeth_netdev_ops; dev->needs_free_netdev = true; dev->type = ARPHRD_X25; - dev->hard_header_len = 3; + dev->hard_header_len = 0; dev->mtu = 1000; dev->addr_len = 0; } @@ -324,6 +334,16 @@ static int lapbeth_new_device(struct net_device *dev) if (!ndev) goto out; + /* When transmitting data: + * first this driver removes a pseudo header of 1 byte, + * then the lapb module prepends an LAPB header of at most 3 bytes, + * then this driver prepends a length field of 2 bytes, + * then the underlying Ethernet device prepends its own header. + */ + ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len + + dev->needed_headroom; + ndev->needed_tailroom = dev->needed_tailroom; + lapbeth = netdev_priv(ndev); lapbeth->axdev = ndev; diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 914be5847386..cdcc380b4c26 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl) netif_wake_queue(sl->dev); } -/* Send one completely decapsulated IP datagram to the IP layer. */ +/* Send an LAPB frame to the LAPB module to process. */ static void x25_asy_bump(struct x25_asy *sl) { @@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl) count = sl->rcount; dev->stats.rx_bytes += count; - skb = dev_alloc_skb(count+1); + skb = dev_alloc_skb(count); if (skb == NULL) { netdev_warn(sl->dev, "memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; return; } - skb_push(skb, 1); /* LAPB internal control */ skb_put_data(skb, sl->rbuff, count); skb->protocol = x25_type_trans(skb, sl->dev); err = lapb_data_received(skb->dev, skb); @@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl) kfree_skb(skb); printk(KERN_DEBUG "x25_asy: data received err - %d\n", err); } else { - netif_rx(skb); dev->stats.rx_packets++; } } @@ -356,12 +354,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb, */ /* - * Called when I frame data arrives. We did the work above - throw it - * at the net layer. + * Called when I frame data arrive. We add a pseudo header for upper + * layers and pass it to upper layers. */ static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) { + if (skb_cow(skb, 1)) { + kfree_skb(skb); + return NET_RX_DROP; + } + skb_push(skb, 1); + skb->data[0] = X25_IFACE_DATA; + + skb->protocol = x25_type_trans(skb, dev); + return netif_rx(skb); } @@ -657,7 +664,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s) switch (s) { case X25_END: if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && - sl->rcount > 2) + sl->rcount >= 2) x25_asy_bump(sl); clear_bit(SLF_ESCAPE, &sl->flags); sl->rcount = 0; diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 30c080094af1..bd5fa4dbab9c 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -2033,6 +2033,7 @@ struct ath10k_htt_tx_ops { int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu); + void (*htt_flush_tx)(struct ath10k_htt *htt); }; static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt) @@ -2072,6 +2073,12 @@ static inline int ath10k_htt_tx(struct ath10k_htt *htt, return htt->tx_ops->htt_tx(htt, txmode, msdu); } +static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt) +{ + if (htt->tx_ops->htt_flush_tx) + htt->tx_ops->htt_flush_tx(htt); +} + static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt) { if (!htt->tx_ops->htt_alloc_txbuff) diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index a182c0944cc7..c38e1963ebc0 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -529,9 +529,14 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt) htt->tx_mem_allocated = false; } -void ath10k_htt_tx_stop(struct ath10k_htt *htt) +static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt) { idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); +} + +void ath10k_htt_tx_stop(struct ath10k_htt *htt) +{ + ath10k_htt_flush_tx_queue(htt); idr_destroy(&htt->pending_tx); } @@ -1535,7 +1540,9 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt, err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); err: return res; } @@ -1742,7 +1749,9 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt, err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); err: return res; } @@ -1774,6 +1783,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_hl = { .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, .htt_tx = ath10k_htt_tx_hl, .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, + .htt_flush_tx = ath10k_htt_flush_tx_queue, }; void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index c415e971735b..004af89a02b8 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -1145,6 +1145,7 @@ static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd) const struct ath10k_hw_ops qca99x0_ops = { .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes, .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error, + .is_rssi_enable = ath10k_htt_tx_rssi_enable, }; const struct ath10k_hw_ops qca6174_ops = { diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 2ae57c1de7b5..ae4c9edc445c 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -810,7 +810,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw, #define TARGET_10_4_TX_DBG_LOG_SIZE 1024 #define TARGET_10_4_NUM_WDS_ENTRIES 32 -#define TARGET_10_4_DMA_BURST_SIZE 0 +#define TARGET_10_4_DMA_BURST_SIZE 1 #define TARGET_10_4_MAC_AGGR_DELIM 0 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 #define TARGET_10_4_VOW_CONFIG 0 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 36d24ea126a2..d373602a8014 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3911,6 +3911,9 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) if (ret) { ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", ret); + /* remove this msdu from idr tracking */ + ath10k_wmi_cleanup_mgmt_tx_send(ar, skb); + dma_unmap_single(ar->dev, paddr, skb->len, DMA_TO_DEVICE); ieee80211_free_txskb(ar->hw, skb); @@ -7082,6 +7085,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ath10k_wmi_peer_flush(ar, arvif->vdev_id, arvif->bssid, bitmap); } + ath10k_htt_flush_tx(&ar->htt); } return; } @@ -8811,7 +8815,6 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) { - ar->hw->wiphy->max_sched_scan_reqs = 1; ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 0a727502d14c..fd49d3419e79 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2074,6 +2074,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_pci_irq_sync(ar); napi_synchronize(&ar->napi); napi_disable(&ar->napi); + cancel_work_sync(&ar_pci->dump_work); /* Most likely the device has HTT Rx ring configured. The only way to * prevent the device from accessing (and possible corrupting) host diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 39abf8b12903..f46b9083bbf1 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -84,9 +84,11 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, wake_up(&htt->empty_tx_wq); spin_unlock_bh(&htt->tx_lock); + rcu_read_lock(); if (txq && txq->sta && skb_cb->airtime_est) ieee80211_sta_register_airtime(txq->sta, txq->tid, skb_cb->airtime_est, 0); + rcu_read_unlock(); if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 1491c25518bb..edccabc667e8 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -133,6 +133,7 @@ struct wmi_ops { struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *skb, dma_addr_t paddr); + int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu); struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, u32 log_level); struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); @@ -441,6 +442,15 @@ ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) return ar->wmi.ops->get_txbf_conf_scheme(ar); } +static inline int +ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu) +{ + if (!ar->wmi.ops->cleanup_mgmt_tx_send) + return -EOPNOTSUPP; + + return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu); +} + static inline int ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, dma_addr_t paddr) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index eb0c963d9fd5..9d5b9df29c35 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -2837,6 +2837,18 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) return skb; } +static int +ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar, + struct sk_buff *msdu) +{ + struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); + struct ath10k_wmi *wmi = &ar->wmi; + + idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id); + + return 0; +} + static int ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb, dma_addr_t paddr) @@ -2911,6 +2923,8 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, if (desc_id < 0) goto err_free_skb; + cb->msdu_id = desc_id; + ptr = (void *)skb->data; tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD); @@ -4339,6 +4353,7 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, /* .gen_mgmt_tx = not implemented; HTT is used */ .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send, + .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send, .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 4ed21dad6a8e..3f563e02d17d 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -733,11 +733,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) return; } + rx_buf->skb = nskb; + usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), nskb->data, MAX_REG_IN_BUF_SIZE, - ath9k_hif_usb_reg_in_cb, nskb, 1); + ath9k_hif_usb_reg_in_cb, rx_buf, 1); } resubmit: diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c index 51934d191f33..1ab09e1c9ec5 100644 --- a/drivers/net/wireless/ath/carl9170/fw.c +++ b/drivers/net/wireless/ath/carl9170/fw.c @@ -338,9 +338,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); if (SUPP(CARL9170FW_WLANTX_CAB)) { - if_comb_types |= - BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_GO); + if_comb_types |= BIT(NL80211_IFTYPE_AP); #ifdef CONFIG_MAC80211_MESH if_comb_types |= diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 40a8054f8aa6..21ca62b06214 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar, ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) && (vif->type != NL80211_IFTYPE_AP)); - /* While the driver supports HW offload in a single - * P2P client configuration, it doesn't support HW - * offload in the favourit, concurrent P2P GO+CLIENT - * configuration. Hence, HW offload will always be - * disabled for P2P. + /* The driver used to have P2P GO+CLIENT support, + * but since this was dropped and we don't know if + * there are any gremlins lurking in the shadows, + * so best we keep HW offload disabled for P2P. */ ar->disable_offload |= vif->p2p; @@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_STATION) break; - /* P2P GO [master] use-case - * Because the P2P GO station is selected dynamically - * by all participating peers of a WIFI Direct network, - * the driver has be able to change the main interface - * operating mode on the fly. - */ - if (main_vif->p2p && vif->p2p && - vif->type == NL80211_IFTYPE_AP) { - old_main = main_vif; - break; - } - err = -EBUSY; rcu_read_unlock(); diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 79998a3ddb7a..ad051f34e65b 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1341,7 +1341,7 @@ static int wcn36xx_probe(struct platform_device *pdev) if (addr && ret != ETH_ALEN) { wcn36xx_err("invalid local-mac-address\n"); ret = -EINVAL; - goto out_wq; + goto out_destroy_ept; } else if (addr) { wcn36xx_info("mac address: %pM\n", addr); SET_IEEE80211_PERM_ADDR(wcn->hw, addr); @@ -1349,7 +1349,7 @@ static int wcn36xx_probe(struct platform_device *pdev) ret = wcn36xx_platform_get_resources(wcn, pdev); if (ret) - goto out_wq; + goto out_destroy_ept; wcn36xx_init_ieee80211(wcn); ret = ieee80211_register_hw(wcn->hw); @@ -1361,6 +1361,8 @@ static int wcn36xx_probe(struct platform_device *pdev) out_unmap: iounmap(wcn->ccu_base); iounmap(wcn->dxe_base); +out_destroy_ept: + rpmsg_destroy_ept(wcn->smd_channel); out_wq: ieee80211_free_hw(hw); out_err: diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index b85603e91c7a..3432dfe1ddb4 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -5569,7 +5569,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) /* fill hw info */ ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, SIGNAL_DBM); - + ieee80211_hw_set(hw, MFP_CAPABLE); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT) | diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index 8b6b657c4b85..5208a39fd6f7 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -3801,6 +3801,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev) /* fill hw info */ ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, MFP_CAPABLE); /* Allow WPA3 in software */ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c index e9b23c2e5bd4..efd63f4ce74f 100644 --- a/drivers/net/wireless/broadcom/b43legacy/xmit.c +++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c @@ -558,6 +558,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev, default: b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n", chanstat); + goto drop; } memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 04a58aa2a571..4266668b5bce 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -82,6 +82,8 @@ #define BRCMF_ND_INFO_TIMEOUT msecs_to_jiffies(2000) +#define BRCMF_PS_MAX_TIMEOUT_MS 2000 + #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) @@ -2789,6 +2791,12 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev, else bphy_err(drvr, "error (%d)\n", err); } + + err = brcmf_fil_iovar_int_set(ifp, "pm2_sleep_ret", + min_t(u32, timeout, BRCMF_PS_MAX_TIMEOUT_MS)); + if (err) + bphy_err(drvr, "Unable to set pm timeout, (%d)\n", err); + done: brcmf_dbg(TRACE, "Exit\n"); return err; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 2c3526aeca6f..545015610cf8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -283,13 +283,14 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) if (!err) ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC); + brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); + if (drvr->settings->feature_disable) { brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n", ifp->drvr->feat_flags, drvr->settings->feature_disable); ifp->drvr->feat_flags &= ~drvr->settings->feature_disable; } - brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); brcmf_feat_firmware_overrides(drvr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index f3f7b1514167..ee728d6af2a3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -19,7 +19,7 @@ #define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008 #define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */ -#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002 +#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0004 #define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */ #define BRCMF_STA_WME 0x00000002 /* WMM association */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 06aab9001f78..06478733b2ef 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -643,6 +643,7 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, int ifidx) { + struct brcmf_fws_hanger_item *hi; bool (*matchfn)(struct sk_buff *, void *) = NULL; struct sk_buff *skb; int prec; @@ -654,6 +655,9 @@ static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); while (skb) { hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); + hi = &fws->hanger.items[hslot]; + WARN_ON(skb != hi->pkt); + hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true); brcmu_pkt_buf_free_skb(skb); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 016256c0af6a..0f9ecadc0d5e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3862,7 +3862,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) if (bus->idlecount > bus->idletime) { brcmf_dbg(SDIO, "idle\n"); sdio_claim_host(bus->sdiodev->func1); - brcmf_sdio_wd_timer(bus, false); +#ifdef DEBUG + if (!BRCMF_FWCON_ON() || + bus->console_interval == 0) +#endif + brcmf_sdio_wd_timer(bus, false); bus->idlecount = 0; brcmf_sdio_bus_sleep(bus, true, false); sdio_release_host(bus->sdiodev->func1); diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 746749f37996..1107b96a8a88 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -4286,8 +4286,8 @@ il_apm_init(struct il_priv *il) * power savings, even without L1. */ if (il->cfg->set_l0s) { - pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { + ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); + if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) { /* L1-ASPM enabled; disable(!) L0S */ il_set_bit(il, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index ad18c2f1a806..524f9dd2323d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -478,6 +476,11 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta, if (kstrtou16(buf, 0, &amsdu_len)) return -EINVAL; + /* only change from debug set <-> debug unset */ + if ((amsdu_len && mvmsta->orig_amsdu_len) || + (!!amsdu_len && mvmsta->orig_amsdu_len)) + return -EBUSY; + if (amsdu_len) { mvmsta->orig_amsdu_len = sta->max_amsdu_len; sta->max_amsdu_len = amsdu_len; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 6ca087ffd163..ed92a8e8cd51 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1193,14 +1193,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) */ flush_work(&mvm->roc_done_wk); + iwl_mvm_rm_aux_sta(mvm); + iwl_mvm_stop_device(mvm); iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ - /* the fw is stopped, the aux sta is dead: clean up driver state */ - iwl_mvm_del_aux_sta(mvm); - /* * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the * hw (as restart_complete() won't be called in this case) and mac80211 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 5b2bd603febf..be8bc0601d7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -367,14 +367,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, u16 size = le32_to_cpu(notif->amsdu_size); int i; - /* - * In debug sta->max_amsdu_len < size - * so also check with orig_amsdu_len which holds the original - * data before debugfs changed the value - */ - if (WARN_ON(sta->max_amsdu_len < size && - mvmsta->orig_amsdu_len < size)) + if (sta->max_amsdu_len < size) { + /* + * In debug sta->max_amsdu_len < size + * so also check with orig_amsdu_len which holds the + * original data before debugfs changed the value + */ + WARN_ON(mvmsta->orig_amsdu_len < size); goto out; + } mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); mvmsta->max_amsdu_len = size; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 71d339e90a9e..a36aa9e85e0b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1184,17 +1184,15 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) iwl_mvm_change_queue_tid(mvm, i); + rcu_read_unlock(); + if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); - if (ret) { - rcu_read_unlock(); + if (ret) return ret; - } } - rcu_read_unlock(); - return free_queue; } @@ -2080,18 +2078,26 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return ret; } +int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + + iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0); + ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); + if (ret) + IWL_WARN(mvm, "Failed sending remove station\n"); + iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); + + return ret; +} + void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) { iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); } -void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) -{ - lockdep_assert_held(&mvm->mutex); - - iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); -} - /* * Send the add station command for the vif's broadcast station. * Assumes that the station was already allocated. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 8d70093847cb..da2d1ac01229 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -541,7 +541,7 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u8 queue, bool start); int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); -void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); +int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index 25ac9db35dbf..bedc09215088 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -247,10 +247,10 @@ static void if_usb_disconnect(struct usb_interface *intf) lbtf_deb_enter(LBTF_DEB_MAIN); - if_usb_reset_device(priv); - - if (priv) + if (priv) { + if_usb_reset_device(priv); lbtf_remove_card(priv); + } /* Unlink and free urb */ if_usb_free(cardp); diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index d89684168500..9e6dc289ec3e 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1496,7 +1496,8 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - static struct mwifiex_sta_node *node; + struct mwifiex_sta_node *node; + int i; if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && priv->media_connected && idx == 0) { @@ -1506,13 +1507,10 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST, HostCmd_ACT_GEN_GET, 0, NULL, true); - if (node && (&node->list == &priv->sta_list)) { - node = NULL; - return -ENOENT; - } - - node = list_prepare_entry(node, &priv->sta_list, list); - list_for_each_entry_continue(node, &priv->sta_list, list) { + i = 0; + list_for_each_entry(node, &priv->sta_list, list) { + if (i++ != idx) + continue; ether_addr_copy(mac, node->mac_addr); return mwifiex_dump_station_info(priv, node, sinfo); } diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index f672bdf52cc1..2d9ec225aead 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -36,9 +36,9 @@ #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin" #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin" #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin" -#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin" +#define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin" #define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin" -#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin" +#define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin" #define BLOCK_MODE 1 #define BYTE_MODE 0 diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 20c206da0631..7ae2c34f65db 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -580,6 +580,11 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, { struct host_cmd_ds_802_11_key_material *key = &resp->params.key_material; + int len; + + len = le16_to_cpu(key->key_param_set.key_len); + if (len > sizeof(key->key_param_set.key)) + return -EINVAL; if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) { @@ -593,9 +598,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, memset(priv->aes_key.key_param_set.key, 0, sizeof(key->key_param_set.key)); - priv->aes_key.key_param_set.key_len = key->key_param_set.key_len; - memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, - le16_to_cpu(priv->aes_key.key_param_set.key_len)); + priv->aes_key.key_param_set.key_len = cpu_to_le16(len); + memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, len); return 0; } @@ -610,9 +614,14 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct host_cmd_ds_802_11_key_material_v2 *key_v2; - __le16 len; + int len; key_v2 = &resp->params.key_material_v2; + + len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len); + if (len > WLAN_KEY_LEN_CCMP) + return -EINVAL; + if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) { mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n"); @@ -628,10 +637,9 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0, WLAN_KEY_LEN_CCMP); priv->aes_key_v2.key_param_set.key_params.aes.key_len = - key_v2->key_param_set.key_params.aes.key_len; - len = priv->aes_key_v2.key_param_set.key_params.aes.key_len; + cpu_to_le16(len); memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key, - key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len)); + key_v2->key_param_set.key_params.aes.key, len); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index 8f3d36a15e17..cbff0dfc9631 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -143,8 +143,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) struct ieee80211_sta *sta; struct mt76_rx_tid *tid; bool sn_less; - u16 seqno, head, size; - u8 ackp, idx; + u16 seqno, head, size, idx; + u8 ackp; __skb_queue_tail(frames, skb); @@ -230,7 +230,7 @@ out: } int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno, - u16 ssn, u8 size) + u16 ssn, u16 size) { struct mt76_rx_tid *tid; @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(mt76_rx_aggr_start); static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) { - u8 size = tid->size; + u16 size = tid->size; int i; cancel_delayed_work(&tid->reorder_work); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 502814c26b33..52a16b42dfd7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -240,8 +240,8 @@ struct mt76_rx_tid { struct delayed_work reorder_work; u16 head; - u8 size; - u8 nframes; + u16 size; + u16 nframes; u8 started:1, stopped:1, timer_pending:1; @@ -723,7 +723,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx, void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, - u16 ssn, u8 size); + u16 ssn, u16 size); void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 842cd81704db..b6867d93c0e3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -119,8 +119,10 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd, struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; int ret = 0; - if (seq != rxd->seq) - return -EAGAIN; + if (seq != rxd->seq) { + ret = -EAGAIN; + goto out; + } switch (cmd) { case -MCU_CMD_PATCH_SEM_CONTROL: @@ -134,6 +136,7 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd, default: break; } +out: dev_kfree_skb(skb); return ret; diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 348b0072cdd6..bad06939a247 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -718,8 +718,11 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) usb_anchor_urb(urb, &rtlusb->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); - if (err) + if (err) { + usb_unanchor_urb(urb); + usb_free_urb(urb); goto err_out; + } usb_free_urb(urb); } return 0; @@ -881,10 +884,8 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, WARN_ON(NULL == skb); _urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!_urb) { - kfree_skb(skb); + if (!_urb) return NULL; - } _rtl_install_trx_info(rtlusb, skb, ep_num); usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev, ep_num), skb->data, skb->len, _rtl_tx_complete, skb); @@ -898,7 +899,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); u32 ep_num; struct urb *_urb = NULL; - struct sk_buff *_skb = NULL; WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); if (unlikely(IS_USB_STOP(rtlusb))) { @@ -907,8 +907,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, return; } ep_num = rtlusb->ep_map.ep_mapping[qnum]; - _skb = skb; - _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); + _urb = _rtl_usb_tx_urb_setup(hw, skb, ep_num); if (unlikely(!_urb)) { pr_err("Can't allocate urb. Drop skb!\n"); kfree_skb(skb); diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index 3e95ad198912..853ac1c2ed73 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -1923,7 +1923,8 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason) if (coex_stat->wl_under_ips) return; - if (coex->freeze && !coex_stat->bt_setup_link) + if (coex->freeze && coex_dm->reason == COEX_RSN_BTINFO && + !coex_stat->bt_setup_link) return; coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN]++; diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 35dbdb3c4f1e..8efaee7571f3 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -340,7 +340,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv); SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable); SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode); - SET_RA_INFO_LDPC(h2c_pkt, si->ldpc_en); + SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en); SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update); SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable); SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt); diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 88e2252bf8a2..15c7a6fc37b9 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -553,8 +553,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) stbc_en = VHT_STBC_EN; if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) ldpc_en = VHT_LDPC_EN; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) - is_support_sgi = true; } else if (sta->ht_cap.ht_supported) { ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) | (sta->ht_cap.mcs.rx_mask[0] << 12); @@ -562,9 +560,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) stbc_en = HT_STBC_EN; if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) ldpc_en = HT_LDPC_EN; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20 || - sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) - is_support_sgi = true; } if (efuse->hw_cap.nss == 1) @@ -606,12 +601,18 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_80: bw_mode = RTW_CHANNEL_WIDTH_80; + is_support_sgi = sta->vht_cap.vht_supported && + (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); break; case IEEE80211_STA_RX_BW_40: bw_mode = RTW_CHANNEL_WIDTH_40; + is_support_sgi = sta->ht_cap.ht_supported && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); break; default: bw_mode = RTW_CHANNEL_WIDTH_20; + is_support_sgi = sta->ht_cap.ht_supported && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); break; } diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 77a2bdee50fa..4a43c4fa716d 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -974,6 +974,7 @@ static int rtw_pci_io_mapping(struct rtw_dev *rtwdev, len = pci_resource_len(pdev, bar_id); rtwpci->mmap = pci_iomap(pdev, bar_id, len); if (!rtwpci->mmap) { + pci_release_regions(pdev); rtw_err(rtwdev, "failed to map pci memory\n"); return -ENOMEM; } diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c index 850864dbafa1..e6d426edab56 100644 --- a/drivers/net/wireless/ti/wl1251/event.c +++ b/drivers/net/wireless/ti/wl1251/event.c @@ -70,7 +70,7 @@ static int wl1251_event_ps_report(struct wl1251 *wl, break; } - return 0; + return ret; } static void wl1251_event_mbox_dump(struct event_mailbox *mbox) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 482c6c8b0fb7..88280057e032 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +#define XENNET_TIMEOUT (5 * HZ) + static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { @@ -1334,12 +1336,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); - xenbus_switch_state(dev, XenbusStateInitialising); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) != - XenbusStateClosed && - xenbus_read_driver_state(dev->otherend) != - XenbusStateUnknown); + do { + xenbus_switch_state(dev, XenbusStateInitialising); + err = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown, XENNET_TIMEOUT); + } while (!err); + return netdev; exit: @@ -2139,28 +2144,43 @@ static const struct attribute_group xennet_dev_group = { }; #endif /* CONFIG_SYSFS */ +static void xennet_bus_close(struct xenbus_device *dev) +{ + int ret; + + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { + xenbus_switch_state(dev, XenbusStateClosing); + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosing || + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); + + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + + do { + xenbus_switch_state(dev, XenbusStateClosed); + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); +} + static int xennet_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); - dev_dbg(&dev->dev, "%s\n", dev->nodename); - - if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { - xenbus_switch_state(dev, XenbusStateClosing); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosing || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); - - xenbus_switch_state(dev, XenbusStateClosed); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosed || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); - } - + xennet_bus_close(dev); xennet_disconnect_backend(info); if (info->netdev->reg_state == NETREG_REGISTERED) diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c index 91d4d5b28a7d..ba6c486d6465 100644 --- a/drivers/nfc/s3fwrn5/core.c +++ b/drivers/nfc/s3fwrn5/core.c @@ -198,6 +198,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb, case S3FWRN5_MODE_FW: return s3fwrn5_fw_recv_frame(ndev, skb); default: + kfree_skb(skb); return -ENODEV; } } diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index 9642971e89ce..457854765983 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -966,7 +966,7 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev, rc = down_killable(&stcontext->exchange_lock); if (rc) { WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n"); - return rc; + goto free_skb_resp; } rc = st95hf_spi_send(&stcontext->spicontext, skb->data, diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c index 2581ab724c34..f8f75a504a58 100644 --- a/drivers/ntb/core.c +++ b/drivers/ntb/core.c @@ -214,10 +214,8 @@ int ntb_default_port_number(struct ntb_dev *ntb) case NTB_TOPO_B2B_DSD: return NTB_PORT_SEC_DSD; default: - break; + return 0; } - - return -EINVAL; } EXPORT_SYMBOL(ntb_default_port_number); @@ -240,10 +238,8 @@ int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx) case NTB_TOPO_B2B_DSD: return NTB_PORT_PRI_USD; default: - break; + return 0; } - - return -EINVAL; } EXPORT_SYMBOL(ntb_default_peer_port_number); @@ -315,4 +311,3 @@ static void __exit ntb_driver_exit(void) bus_unregister(&ntb_bus); } module_exit(ntb_driver_exit); - diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index e9b7c2dfc730..5ce4766a6c9e 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -158,6 +158,8 @@ struct perf_peer { /* NTB connection setup service */ struct work_struct service; unsigned long sts; + + struct completion init_comp; }; #define to_peer_service(__work) \ container_of(__work, struct perf_peer, service) @@ -546,6 +548,7 @@ static int perf_setup_outbuf(struct perf_peer *peer) /* Initialization is finally done */ set_bit(PERF_STS_DONE, &peer->sts); + complete_all(&peer->init_comp); return 0; } @@ -556,7 +559,7 @@ static void perf_free_inbuf(struct perf_peer *peer) return; (void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx); - dma_free_coherent(&peer->perf->ntb->dev, peer->inbuf_size, + dma_free_coherent(&peer->perf->ntb->pdev->dev, peer->inbuf_size, peer->inbuf, peer->inbuf_xlat); peer->inbuf = NULL; } @@ -585,8 +588,9 @@ static int perf_setup_inbuf(struct perf_peer *peer) perf_free_inbuf(peer); - peer->inbuf = dma_alloc_coherent(&perf->ntb->dev, peer->inbuf_size, - &peer->inbuf_xlat, GFP_KERNEL); + peer->inbuf = dma_alloc_coherent(&perf->ntb->pdev->dev, + peer->inbuf_size, &peer->inbuf_xlat, + GFP_KERNEL); if (!peer->inbuf) { dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n", &peer->inbuf_size); @@ -636,6 +640,7 @@ static void perf_service_work(struct work_struct *work) perf_setup_outbuf(peer); if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) { + init_completion(&peer->init_comp); clear_bit(PERF_STS_DONE, &peer->sts); if (test_bit(0, &peer->perf->busy_flag) && peer == peer->perf->test_peer) { @@ -652,7 +657,7 @@ static int perf_init_service(struct perf_ctx *perf) { u64 mask; - if (ntb_peer_mw_count(perf->ntb) < perf->pcnt + 1) { + if (ntb_peer_mw_count(perf->ntb) < perf->pcnt) { dev_err(&perf->ntb->dev, "Not enough memory windows\n"); return -EINVAL; } @@ -1051,8 +1056,9 @@ static int perf_submit_test(struct perf_peer *peer) struct perf_thread *pthr; int tidx, ret; - if (!test_bit(PERF_STS_DONE, &peer->sts)) - return -ENOLINK; + ret = wait_for_completion_interruptible(&peer->init_comp); + if (ret < 0) + return ret; if (test_and_set_bit_lock(0, &perf->busy_flag)) return -EBUSY; @@ -1418,10 +1424,21 @@ static int perf_init_peers(struct perf_ctx *perf) peer->gidx = pidx; } INIT_WORK(&peer->service, perf_service_work); + init_completion(&peer->init_comp); } if (perf->gidx == -1) perf->gidx = pidx; + /* + * Hardware with only two ports may not have unique port + * numbers. In this case, the gidxs should all be zero. + */ + if (perf->pcnt == 1 && ntb_port_number(perf->ntb) == 0 && + ntb_peer_port_number(perf->ntb, 0) == 0) { + perf->gidx = 0; + perf->peers[0].gidx = 0; + } + for (pidx = 0; pidx < perf->pcnt; pidx++) { ret = perf_setup_peer_mw(&perf->peers[pidx]); if (ret) @@ -1517,4 +1534,3 @@ static void __exit perf_exit(void) destroy_workqueue(perf_wq); } module_exit(perf_exit); - diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c index 65865e460ab8..18d00eec7b02 100644 --- a/drivers/ntb/test/ntb_pingpong.c +++ b/drivers/ntb/test/ntb_pingpong.c @@ -121,15 +121,14 @@ static int pp_find_next_peer(struct pp_ctx *pp) link = ntb_link_is_up(pp->ntb, NULL, NULL); /* Find next available peer */ - if (link & pp->nmask) { + if (link & pp->nmask) pidx = __ffs64(link & pp->nmask); - out_db = BIT_ULL(pidx + 1); - } else if (link & pp->pmask) { + else if (link & pp->pmask) pidx = __ffs64(link & pp->pmask); - out_db = BIT_ULL(pidx); - } else { + else return -ENODEV; - } + + out_db = BIT_ULL(ntb_peer_port_number(pp->ntb, pidx)); spin_lock(&pp->lock); pp->out_pidx = pidx; @@ -303,7 +302,7 @@ static void pp_init_flds(struct pp_ctx *pp) break; } - pp->in_db = BIT_ULL(pidx); + pp->in_db = BIT_ULL(lport); pp->pmask = GENMASK_ULL(pidx, 0) >> 1; pp->nmask = GENMASK_ULL(pcnt - 1, pidx); @@ -435,4 +434,3 @@ static void __exit pp_exit(void) debugfs_remove_recursive(pp_dbgfs_topdir); } module_exit(pp_exit); - diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c index d592c0ffbd19..311d6ab8d016 100644 --- a/drivers/ntb/test/ntb_tool.c +++ b/drivers/ntb/test/ntb_tool.c @@ -504,7 +504,7 @@ static ssize_t tool_peer_link_read(struct file *filep, char __user *ubuf, buf[1] = '\n'; buf[2] = '\0'; - return simple_read_from_buffer(ubuf, size, offp, buf, 3); + return simple_read_from_buffer(ubuf, size, offp, buf, 2); } static TOOL_FOPS_RDWR(tool_peer_link_fops, @@ -590,7 +590,7 @@ static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx, inmw->size = min_t(resource_size_t, req_size, size); inmw->size = round_up(inmw->size, addr_align); inmw->size = round_up(inmw->size, size_align); - inmw->mm_base = dma_alloc_coherent(&tc->ntb->dev, inmw->size, + inmw->mm_base = dma_alloc_coherent(&tc->ntb->pdev->dev, inmw->size, &inmw->dma_base, GFP_KERNEL); if (!inmw->mm_base) return -ENOMEM; @@ -612,7 +612,7 @@ static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx, return 0; err_free_dma: - dma_free_coherent(&tc->ntb->dev, inmw->size, inmw->mm_base, + dma_free_coherent(&tc->ntb->pdev->dev, inmw->size, inmw->mm_base, inmw->dma_base); inmw->mm_base = NULL; inmw->dma_base = 0; @@ -629,7 +629,7 @@ static void tool_free_mw(struct tool_ctx *tc, int pidx, int widx) if (inmw->mm_base != NULL) { ntb_mw_clear_trans(tc->ntb, pidx, widx); - dma_free_coherent(&tc->ntb->dev, inmw->size, + dma_free_coherent(&tc->ntb->pdev->dev, inmw->size, inmw->mm_base, inmw->dma_base); } @@ -1690,4 +1690,3 @@ static void __exit tool_exit(void) debugfs_remove_recursive(tool_dbgfs_topdir); } module_exit(tool_exit); - diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index 89b85970912d..35d265014e1e 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -450,14 +450,19 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm) else dev_dbg(&nvdimm->dev, "overwrite completed\n"); - if (nvdimm->sec.overwrite_state) - sysfs_notify_dirent(nvdimm->sec.overwrite_state); + /* + * Mark the overwrite work done and update dimm security flags, + * then send a sysfs event notification to wake up userspace + * poll threads to picked up the changed state. + */ nvdimm->sec.overwrite_tmo = 0; clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags); clear_bit(NDD_WORK_PENDING, &nvdimm->flags); - put_device(&nvdimm->dev); nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); - nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER); + nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER); + if (nvdimm->sec.overwrite_state) + sysfs_notify_dirent(nvdimm->sec.overwrite_state); + put_device(&nvdimm->dev); } void nvdimm_security_overwrite_query(struct work_struct *work) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f0e0af3aa714..2d2673d360ff 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1032,6 +1032,19 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); +/* + * In NVMe 1.0 the CNS field was just a binary controller or namespace + * flag, thus sending any new CNS opcodes has a big chance of not working. + * Qemu unfortunately had that bug after reporting a 1.1 version compliance + * (but not for any later version). + */ +static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) +{ + if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) + return ctrl->vs < NVME_VS(1, 2, 0); + return ctrl->vs < NVME_VS(1, 1, 0); +} + static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) { struct nvme_command c = { }; @@ -1061,6 +1074,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, int pos; int len; + if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) + return 0; + c.identify.opcode = nvme_admin_identify; c.identify.nsid = cpu_to_le32(nsid); c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; @@ -1074,12 +1090,6 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, if (status) { dev_warn(ctrl->device, "Identify Descriptors failed (%d)\n", status); - /* - * Don't treat an error as fatal, as we potentially already - * have a NGUID or EUI-64. - */ - if (status > 0 && !(status & NVME_SC_DNR)) - status = 0; goto free_data; } @@ -3740,8 +3750,7 @@ static void nvme_scan_work(struct work_struct *work) mutex_lock(&ctrl->scan_lock); nn = le32_to_cpu(id->nn); - if (ctrl->vs >= NVME_VS(1, 1, 0) && - !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { + if (!nvme_ctrl_limited_cns(ctrl)) { if (!nvme_scan_ns_list(ctrl, nn)) goto out_free_id; } @@ -4003,7 +4012,7 @@ static void nvme_free_ctrl(struct device *dev) container_of(dev, struct nvme_ctrl, ctrl_device); struct nvme_subsystem *subsys = ctrl->subsys; - if (subsys && ctrl->instance != subsys->instance) + if (!subsys || ctrl->instance != subsys->instance) ida_simple_remove(&nvme_instance_ida, ctrl->instance); kfree(ctrl->effects); @@ -4139,7 +4148,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_unfreeze); -void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) +int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) { struct nvme_ns *ns; @@ -4150,6 +4159,7 @@ void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) break; } up_read(&ctrl->namespaces_rwsem); + return timeout; } EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 74b8818ac9a1..3bb71f177dfd 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -565,10 +565,14 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, struct nvme_request *req = nvme_req(rq); /* - * If we are in some state of setup or teardown only allow - * internally generated commands. + * currently we have a problem sending passthru commands + * on the admin_q if the controller is not LIVE because we can't + * make sure that they are going out after the admin connect, + * controller enable and/or other commands in the initialization + * sequence. until the controller will be LIVE, fail with + * BLK_STS_RESOURCE so that they will be rescheduled. */ - if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD)) + if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) return false; /* @@ -576,9 +580,8 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, * which is require to set the queue live in the appropinquate states. */ switch (ctrl->state) { - case NVME_CTRL_NEW: case NVME_CTRL_CONNECTING: - if (nvme_is_fabrics(req->cmd) && + if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && req->cmd->fabrics.fctype == nvme_fabrics_type_connect) return true; break; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 83ac88924f25..dae050d1f814 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1740,7 +1740,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { dev_err(ctrl->dev, "FCP Op failed - cmdiu dma mapping failed.\n"); - ret = EFAULT; + ret = -EFAULT; goto out_on_error; } @@ -1750,7 +1750,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { dev_err(ctrl->dev, "FCP Op failed - rspiu dma mapping failed.\n"); - ret = EFAULT; + ret = -EFAULT; } atomic_set(&op->state, FCPOP_STATE_IDLE); @@ -1820,6 +1820,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) struct nvme_fc_fcp_op *aen_op; int i; + cancel_work_sync(&ctrl->ctrl.async_event_work); aen_op = ctrl->aen_ops; for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { if (!aen_op->fcp_req.private) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 56caddeabb5e..0a458f788088 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -3,6 +3,7 @@ * Copyright (c) 2017-2018 Christoph Hellwig. */ +#include #include #include #include "nvme.h" @@ -248,6 +249,17 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, fallback = ns; } + /* + * The loop above skips the current path for round-robin semantics. + * Fall back to the current path if either: + * - no other optimized path found and current is optimized, + * - no other usable path found and current is usable. + */ + if (!nvme_path_is_disabled(old) && + (old->ana_state == NVME_ANA_OPTIMIZED || + (!fallback && old->ana_state == NVME_ANA_NONOPTIMIZED))) + return old; + if (!fallback) return NULL; found = fallback; @@ -268,10 +280,13 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) struct nvme_ns *ns; ns = srcu_dereference(head->current_path[node], &head->srcu); - if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR && ns) - ns = nvme_round_robin_path(head, node, ns); - if (unlikely(!ns || !nvme_path_is_optimized(ns))) - ns = __nvme_find_path(head, node); + if (unlikely(!ns)) + return __nvme_find_path(head, node); + + if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR) + return nvme_round_robin_path(head, node, ns); + if (unlikely(!nvme_path_is_optimized(ns))) + return __nvme_find_path(head, node); return ns; } @@ -413,15 +428,14 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) { struct nvme_ns_head *head = ns->head; - lockdep_assert_held(&ns->head->lock); - if (!head->disk) return; - if (!(head->disk->flags & GENHD_FL_UP)) + if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) device_add_disk(&head->subsys->dev, head->disk, nvme_ns_id_attr_groups); + mutex_lock(&head->lock); if (nvme_path_is_optimized(ns)) { int node, srcu_idx; @@ -430,9 +444,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) __nvme_find_path(head, node); srcu_read_unlock(&head->srcu, srcu_idx); } + mutex_unlock(&head->lock); - synchronize_srcu(&ns->head->srcu); - kblockd_schedule_work(&ns->head->requeue_work); + synchronize_srcu(&head->srcu); + kblockd_schedule_work(&head->requeue_work); } static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, @@ -483,14 +498,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state) static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, struct nvme_ns *ns) { - mutex_lock(&ns->head->lock); ns->ana_grpid = le32_to_cpu(desc->grpid); ns->ana_state = desc->state; clear_bit(NVME_NS_ANA_PENDING, &ns->flags); if (nvme_state_is_live(ns->ana_state)) nvme_mpath_set_live(ns); - mutex_unlock(&ns->head->lock); } static int nvme_update_ana_state(struct nvme_ctrl *ctrl, @@ -640,31 +653,45 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, } DEVICE_ATTR_RO(ana_state); -static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl, +static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *desc, void *data) { - struct nvme_ns *ns = data; + struct nvme_ana_group_desc *dst = data; - if (ns->ana_grpid == le32_to_cpu(desc->grpid)) { - nvme_update_ns_ana_state(desc, ns); - return -ENXIO; /* just break out of the loop */ - } + if (desc->grpid != dst->grpid) + return 0; - return 0; + *dst = *desc; + return -ENXIO; /* just break out of the loop */ } void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) { if (nvme_ctrl_use_ana(ns->ctrl)) { + struct nvme_ana_group_desc desc = { + .grpid = id->anagrpid, + .state = 0, + }; + mutex_lock(&ns->ctrl->ana_lock); ns->ana_grpid = le32_to_cpu(id->anagrpid); - nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state); + nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); mutex_unlock(&ns->ctrl->ana_lock); + if (desc.state) { + /* found the group desc: update */ + nvme_update_ns_ana_state(&desc, ns); + } } else { - mutex_lock(&ns->head->lock); ns->ana_state = NVME_ANA_OPTIMIZED; nvme_mpath_set_live(ns); - mutex_unlock(&ns->head->lock); + } + + if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) { + struct gendisk *disk = ns->head->disk; + + if (disk) + disk->queue->backing_dev_info->capabilities |= + BDI_CAP_STABLE_WRITES; } } @@ -679,6 +706,14 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); blk_cleanup_queue(head->disk->queue); + if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + /* + * if device_add_disk wasn't called, prevent + * disk release to put a bogus reference on the + * request queue + */ + head->disk->queue = NULL; + } put_disk(head->disk); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 22e8401352c2..2bd9f7c3084f 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -115,6 +115,13 @@ enum nvme_quirks { * Prevent tag overlap between queues */ NVME_QUIRK_SHARED_TAGS = (1 << 13), + + /* + * The controller doesn't handle the Identify Namespace + * Identification Descriptor list subcommand despite claiming + * NVMe 1.3 compliance. + */ + NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), }; /* @@ -345,6 +352,8 @@ struct nvme_ns_head { spinlock_t requeue_lock; struct work_struct requeue_work; struct mutex lock; + unsigned long flags; +#define NVME_NSHEAD_DISK_LIVE 0 struct nvme_ns __rcu *current_path[]; #endif }; @@ -476,7 +485,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl); void nvme_sync_queues(struct nvme_ctrl *ctrl); void nvme_unfreeze(struct nvme_ctrl *ctrl); void nvme_wait_freeze(struct nvme_ctrl *ctrl); -void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); +int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); void nvme_start_freeze(struct nvme_ctrl *ctrl); #define NVME_QID_ANY -1 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index cd64ddb129e5..a91433bdf5de 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -128,6 +128,9 @@ struct nvme_dev { dma_addr_t host_mem_descs_dma; struct nvme_host_mem_buf_desc *host_mem_descs; void **host_mem_desc_bufs; + unsigned int nr_allocated_queues; + unsigned int nr_write_queues; + unsigned int nr_poll_queues; }; static int io_queue_depth_set(const char *val, const struct kernel_param *kp) @@ -210,25 +213,14 @@ struct nvme_iod { struct scatterlist *sg; }; -static unsigned int max_io_queues(void) +static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) { - return num_possible_cpus() + write_queues + poll_queues; -} - -static unsigned int max_queue_count(void) -{ - /* IO queues + admin queue */ - return 1 + max_io_queues(); -} - -static inline unsigned int nvme_dbbuf_size(u32 stride) -{ - return (max_queue_count() * 8 * stride); + return dev->nr_allocated_queues * 8 * dev->db_stride; } static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) { - unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); + unsigned int mem_size = nvme_dbbuf_size(dev); if (dev->dbbuf_dbs) return 0; @@ -253,7 +245,7 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) static void nvme_dbbuf_dma_free(struct nvme_dev *dev) { - unsigned int mem_size = nvme_dbbuf_size(dev->db_stride); + unsigned int mem_size = nvme_dbbuf_size(dev); if (dev->dbbuf_dbs) { dma_free_coherent(dev->dev, mem_size, @@ -1282,8 +1274,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) dev_warn_ratelimited(dev->ctrl.device, "I/O %d QID %d timeout, disable controller\n", req->tag, nvmeq->qid); - nvme_dev_disable(dev, true); nvme_req(req)->flags |= NVME_REQ_CANCELLED; + nvme_dev_disable(dev, true); return BLK_EH_DONE; case NVME_CTRL_RESETTING: return BLK_EH_RESET_TIMER; @@ -1300,10 +1292,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); + nvme_req(req)->flags |= NVME_REQ_CANCELLED; nvme_dev_disable(dev, false); nvme_reset_ctrl(&dev->ctrl); - nvme_req(req)->flags |= NVME_REQ_CANCELLED; return BLK_EH_DONE; } @@ -2030,7 +2022,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) { struct nvme_dev *dev = affd->priv; - unsigned int nr_read_queues; + unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; /* * If there is no interupt available for queues, ensure that @@ -2046,12 +2038,12 @@ static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) if (!nrirqs) { nrirqs = 1; nr_read_queues = 0; - } else if (nrirqs == 1 || !write_queues) { + } else if (nrirqs == 1 || !nr_write_queues) { nr_read_queues = 0; - } else if (write_queues >= nrirqs) { + } else if (nr_write_queues >= nrirqs) { nr_read_queues = 1; } else { - nr_read_queues = nrirqs - write_queues; + nr_read_queues = nrirqs - nr_write_queues; } dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; @@ -2075,7 +2067,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) * Poll queues don't need interrupts, but we need at least one IO * queue left over for non-polled IO. */ - this_p_queues = poll_queues; + this_p_queues = dev->nr_poll_queues; if (this_p_queues >= nr_io_queues) { this_p_queues = nr_io_queues - 1; irq_queues = 1; @@ -2105,14 +2097,25 @@ static void nvme_disable_io_queues(struct nvme_dev *dev) __nvme_disable_io_queues(dev, nvme_admin_delete_cq); } +static unsigned int nvme_max_io_queues(struct nvme_dev *dev) +{ + return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; +} + static int nvme_setup_io_queues(struct nvme_dev *dev) { struct nvme_queue *adminq = &dev->queues[0]; struct pci_dev *pdev = to_pci_dev(dev->dev); - int result, nr_io_queues; + unsigned int nr_io_queues; unsigned long size; + int result; - nr_io_queues = max_io_queues(); + /* + * Sample the module parameters once at reset time so that we have + * stable values to work with. + */ + dev->nr_write_queues = write_queues; + dev->nr_poll_queues = poll_queues; /* * If tags are shared with admin queue (Apple bug), then @@ -2120,6 +2123,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) */ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) nr_io_queues = 1; + else + nr_io_queues = min(nvme_max_io_queues(dev), + dev->nr_allocated_queues - 1); result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); if (result < 0) @@ -2794,8 +2800,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!dev) return -ENOMEM; - dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue), - GFP_KERNEL, node); + dev->nr_write_queues = write_queues; + dev->nr_poll_queues = poll_queues; + dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; + dev->queues = kcalloc_node(dev->nr_allocated_queues, + sizeof(struct nvme_queue), GFP_KERNEL, node); if (!dev->queues) goto free; @@ -2962,9 +2971,15 @@ static int nvme_suspend(struct device *dev) * the PCI bus layer to put it into D3 in order to take the PCIe link * down, so as to allow the platform to achieve its minimum low-power * state (which may not be possible if the link is up). + * + * If a host memory buffer is enabled, shut down the device as the NVMe + * specification allows the device to access the host memory buffer in + * host DRAM from all power states, but hosts will fail access to DRAM + * during S3. */ if (pm_suspend_via_firmware() || !ctrl->npss || !pcie_aspm_enabled(pdev) || + ndev->nr_host_mem_descs || (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) return nvme_disable_prepare_reset(ndev, true); @@ -3102,6 +3117,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS | NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ @@ -3125,6 +3142,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), .driver_data = NVME_QUIRK_SINGLE_VECTOR }, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 73e8475ddc8a..f9444272f861 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -110,6 +110,7 @@ struct nvme_rdma_ctrl { struct sockaddr_storage src_addr; struct nvme_ctrl ctrl; + struct mutex teardown_lock; bool use_inline_data; u32 io_queues[HCTX_MAX_TYPES]; }; @@ -451,7 +452,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) * Spread I/O queues completion vectors according their queue index. * Admin queues can always go on completion vector 0. */ - comp_vector = idx == 0 ? idx : idx - 1; + comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; /* Polling queues need direct cq polling context */ if (nvme_rdma_poll_queue(queue)) @@ -768,6 +769,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); } if (ctrl->async_event_sqe.data) { + cancel_work_sync(&ctrl->ctrl.async_event_work); nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); ctrl->async_event_sqe.data = NULL; @@ -890,17 +892,33 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) ret = PTR_ERR(ctrl->ctrl.connect_q); goto out_free_tag_set; } - } else { - blk_mq_update_nr_hw_queues(&ctrl->tag_set, - ctrl->ctrl.queue_count - 1); } ret = nvme_rdma_start_io_queues(ctrl); if (ret) goto out_cleanup_connect_q; + if (!new) { + nvme_start_queues(&ctrl->ctrl); + if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { + /* + * If we timed out waiting for freeze we are likely to + * be stuck. Fail the controller initialization just + * to be safe. + */ + ret = -ENODEV; + goto out_wait_freeze_timed_out; + } + blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, + ctrl->ctrl.queue_count - 1); + nvme_unfreeze(&ctrl->ctrl); + } + return 0; +out_wait_freeze_timed_out: + nvme_stop_queues(&ctrl->ctrl); + nvme_rdma_stop_io_queues(ctrl); out_cleanup_connect_q: if (new) blk_cleanup_queue(ctrl->ctrl.connect_q); @@ -915,6 +933,7 @@ out_free_io_queues: static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { + mutex_lock(&ctrl->teardown_lock); blk_mq_quiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); if (ctrl->ctrl.admin_tagset) { @@ -925,12 +944,15 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, if (remove) blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_destroy_admin_queue(ctrl, remove); + mutex_unlock(&ctrl->teardown_lock); } static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { + mutex_lock(&ctrl->teardown_lock); if (ctrl->ctrl.queue_count > 1) { + nvme_start_freeze(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); if (ctrl->ctrl.tagset) { @@ -942,6 +964,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, nvme_start_queues(&ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, remove); } + mutex_unlock(&ctrl->teardown_lock); } static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) @@ -1090,6 +1113,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) return; + dev_warn(ctrl->ctrl.device, "starting error recovery\n"); queue_work(nvme_reset_wq, &ctrl->err_work); } @@ -1693,6 +1717,22 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, return 0; } +static void nvme_rdma_complete_timed_out(struct request *rq) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_queue *queue = req->queue; + struct nvme_rdma_ctrl *ctrl = queue->ctrl; + + /* fence other contexts that may complete the command */ + mutex_lock(&ctrl->teardown_lock); + nvme_rdma_stop_queue(queue); + if (!blk_mq_request_completed(rq)) { + nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; + blk_mq_complete_request(rq); + } + mutex_unlock(&ctrl->teardown_lock); +} + static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq, bool reserved) { @@ -1703,29 +1743,29 @@ nvme_rdma_timeout(struct request *rq, bool reserved) dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", rq->tag, nvme_rdma_queue_idx(queue)); - /* - * Restart the timer if a controller reset is already scheduled. Any - * timed out commands would be handled before entering the connecting - * state. - */ - if (ctrl->ctrl.state == NVME_CTRL_RESETTING) - return BLK_EH_RESET_TIMER; - if (ctrl->ctrl.state != NVME_CTRL_LIVE) { /* - * Teardown immediately if controller times out while starting - * or we are already started error recovery. all outstanding - * requests are completed on shutdown, so we return BLK_EH_DONE. + * If we are resetting, connecting or deleting we should + * complete immediately because we may block controller + * teardown or setup sequence + * - ctrl disable/shutdown fabrics requests + * - connect requests + * - initialization admin requests + * - I/O requests that entered after unquiescing and + * the controller stopped responding + * + * All other requests should be cancelled by the error + * recovery work, so it's fine that we fail it here. */ - flush_work(&ctrl->err_work); - nvme_rdma_teardown_io_queues(ctrl, false); - nvme_rdma_teardown_admin_queue(ctrl, false); + nvme_rdma_complete_timed_out(rq); return BLK_EH_DONE; } - dev_warn(ctrl->ctrl.device, "starting error recovery\n"); + /* + * LIVE state should trigger the normal error recovery which will + * handle completing this request. + */ nvme_rdma_error_recovery(ctrl); - return BLK_EH_RESET_TIMER; } @@ -1982,6 +2022,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, return ERR_PTR(-ENOMEM); ctrl->ctrl.opts = opts; INIT_LIST_HEAD(&ctrl->list); + mutex_init(&ctrl->teardown_lock); if (!(opts->mask & NVMF_OPT_TRSVCID)) { opts->trsvcid = diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 11e84ed4de36..c782005ee99f 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -110,6 +110,7 @@ struct nvme_tcp_ctrl { struct sockaddr_storage src_addr; struct nvme_ctrl ctrl; + struct mutex teardown_lock; struct work_struct err_work; struct delayed_work connect_work; struct nvme_tcp_request async_req; @@ -420,6 +421,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) return; + dev_warn(ctrl->device, "starting error recovery\n"); queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); } @@ -784,11 +786,11 @@ static void nvme_tcp_data_ready(struct sock *sk) { struct nvme_tcp_queue *queue; - read_lock(&sk->sk_callback_lock); + read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (likely(queue && queue->rd_enabled)) queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); - read_unlock(&sk->sk_callback_lock); + read_unlock_bh(&sk->sk_callback_lock); } static void nvme_tcp_write_space(struct sock *sk) @@ -1319,6 +1321,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, } } + /* Set 10 seconds timeout for icresp recvmsg */ + queue->sock->sk->sk_rcvtimeo = 10 * HZ; + queue->sock->sk->sk_allocation = GFP_ATOMIC; if (!qid) n = 0; @@ -1435,7 +1440,6 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) return; - __nvme_tcp_stop_queue(queue); } @@ -1503,6 +1507,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) { if (to_tcp_ctrl(ctrl)->async_req.pdu) { + cancel_work_sync(&ctrl->async_event_work); nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); to_tcp_ctrl(ctrl)->async_req.pdu = NULL; } @@ -1681,17 +1686,33 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) ret = PTR_ERR(ctrl->connect_q); goto out_free_tag_set; } - } else { - blk_mq_update_nr_hw_queues(ctrl->tagset, - ctrl->queue_count - 1); } ret = nvme_tcp_start_io_queues(ctrl); if (ret) goto out_cleanup_connect_q; + if (!new) { + nvme_start_queues(ctrl); + if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { + /* + * If we timed out waiting for freeze we are likely to + * be stuck. Fail the controller initialization just + * to be safe. + */ + ret = -ENODEV; + goto out_wait_freeze_timed_out; + } + blk_mq_update_nr_hw_queues(ctrl->tagset, + ctrl->queue_count - 1); + nvme_unfreeze(ctrl); + } + return 0; +out_wait_freeze_timed_out: + nvme_stop_queues(ctrl); + nvme_tcp_stop_io_queues(ctrl); out_cleanup_connect_q: if (new) blk_cleanup_queue(ctrl->connect_q); @@ -1777,6 +1798,7 @@ out_free_queue: static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, bool remove) { + mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); blk_mq_quiesce_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); if (ctrl->admin_tagset) { @@ -1787,13 +1809,17 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, if (remove) blk_mq_unquiesce_queue(ctrl->admin_q); nvme_tcp_destroy_admin_queue(ctrl, remove); + mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); } static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, bool remove) { + mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); if (ctrl->queue_count <= 1) - return; + goto out; + blk_mq_quiesce_queue(ctrl->admin_q); + nvme_start_freeze(ctrl); nvme_stop_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); if (ctrl->tagset) { @@ -1804,6 +1830,8 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, if (remove) nvme_start_queues(ctrl); nvme_tcp_destroy_io_queues(ctrl, remove); +out: + mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); } static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) @@ -2042,40 +2070,55 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) nvme_tcp_queue_request(&ctrl->async_req); } +static void nvme_tcp_complete_timed_out(struct request *rq) +{ + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; + + /* fence other contexts that may complete the command */ + mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); + nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); + if (!blk_mq_request_completed(rq)) { + nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; + blk_mq_complete_request(rq); + } + mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); +} + static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq, bool reserved) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); - struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; + struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; struct nvme_tcp_cmd_pdu *pdu = req->pdu; - /* - * Restart the timer if a controller reset is already scheduled. Any - * timed out commands would be handled before entering the connecting - * state. - */ - if (ctrl->ctrl.state == NVME_CTRL_RESETTING) - return BLK_EH_RESET_TIMER; - - dev_warn(ctrl->ctrl.device, + dev_warn(ctrl->device, "queue %d: timeout request %#x type %d\n", nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); - if (ctrl->ctrl.state != NVME_CTRL_LIVE) { + if (ctrl->state != NVME_CTRL_LIVE) { /* - * Teardown immediately if controller times out while starting - * or we are already started error recovery. all outstanding - * requests are completed on shutdown, so we return BLK_EH_DONE. + * If we are resetting, connecting or deleting we should + * complete immediately because we may block controller + * teardown or setup sequence + * - ctrl disable/shutdown fabrics requests + * - connect requests + * - initialization admin requests + * - I/O requests that entered after unquiescing and + * the controller stopped responding + * + * All other requests should be cancelled by the error + * recovery work, so it's fine that we fail it here. */ - flush_work(&ctrl->err_work); - nvme_tcp_teardown_io_queues(&ctrl->ctrl, false); - nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false); + nvme_tcp_complete_timed_out(rq); return BLK_EH_DONE; } - dev_warn(ctrl->ctrl.device, "starting error recovery\n"); - nvme_tcp_error_recovery(&ctrl->ctrl); - + /* + * LIVE state should trigger the normal error recovery which will + * handle completing this request. + */ + nvme_tcp_error_recovery(ctrl); return BLK_EH_RESET_TIMER; } @@ -2302,6 +2345,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, nvme_tcp_reconnect_ctrl_work); INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); + mutex_init(&ctrl->teardown_lock); if (!(opts->mask & NVMF_OPT_TRSVCID)) { opts->trsvcid = diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 57a4062cbb59..7d7176369edf 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -369,6 +369,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work) static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d start keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); @@ -378,6 +381,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); cancel_delayed_work_sync(&ctrl->ka_work); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index ce8d819f86cc..fc35f7ae67b0 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1994,9 +1994,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) return; if (fcpreq->fcp_error || fcpreq->transferred_length != fcpreq->transfer_length) { - spin_lock(&fod->flock); + spin_lock_irqsave(&fod->flock, flags); fod->abort = true; - spin_unlock(&fod->flock); + spin_unlock_irqrestore(&fod->flock, flags); nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 22014e76d771..e31823f19a0f 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -150,6 +150,11 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd) { + if (unlikely(!queue->nr_cmds)) { + /* We didn't allocate cmds yet, send 0xffff */ + return USHRT_MAX; + } + return cmd - queue->cmds; } @@ -847,7 +852,10 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) struct nvme_tcp_data_pdu *data = &queue->pdu.data; struct nvmet_tcp_cmd *cmd; - cmd = &queue->cmds[data->ttag]; + if (likely(queue->nr_cmds)) + cmd = &queue->cmds[data->ttag]; + else + cmd = &queue->connect; if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { pr_err("ttag %u unexpected data offset %u (expected %u)\n", diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c index c72eef988041..a32e60b024b8 100644 --- a/drivers/of/kobj.c +++ b/drivers/of/kobj.c @@ -134,8 +134,6 @@ int __of_attach_node_sysfs(struct device_node *np) if (!name) return -ENOMEM; - of_node_get(np); - rc = kobject_add(&np->kobj, parent, "%s", name); kfree(name); if (rc) @@ -144,6 +142,7 @@ int __of_attach_node_sysfs(struct device_node *np) for_each_property_of_node(np, pp) __of_add_property_sysfs(np, pp); + of_node_get(np); return 0; } diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index c34a6df712ad..26ddb4cc675a 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -265,10 +265,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) child, addr); if (of_mdiobus_child_is_phy(child)) { + /* -ENODEV is the return code that PHYLIB has + * standardized on to indicate that bus + * scanning should continue. + */ rc = of_mdiobus_register_phy(mdio, child, addr); - if (rc && rc != -ENODEV) + if (!rc) + break; + if (rc != -ENODEV) goto unregister; - break; } } } diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 9ff0538ee83a..7b057c32e11b 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -843,10 +843,12 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) /* Return early if nothing to do */ if (old_freq == freq) { - dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", - __func__, freq); - ret = 0; - goto put_opp_table; + if (!opp_table->required_opp_tables && !opp_table->regulators) { + dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", + __func__, freq); + ret = 0; + goto put_opp_table; + } } temp_freq = old_freq; diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index de8e4e347249..e410033b6df0 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1270,7 +1270,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) ** (one that doesn't overlap memory or LMMIO space) in the ** IBASE and IMASK registers. */ - ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); + ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL; iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 2fccb5762c76..0914ddeae17f 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -204,17 +204,13 @@ EXPORT_SYMBOL(pci_bus_set_ops); static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); static noinline void pci_wait_cfg(struct pci_dev *dev) + __must_hold(&pci_lock) { - DECLARE_WAITQUEUE(wait, current); - - __add_wait_queue(&pci_cfg_wait, &wait); do { - set_current_state(TASK_UNINTERRUPTIBLE); raw_spin_unlock_irq(&pci_lock); - schedule(); + wait_event(pci_cfg_wait, !dev->block_cfg_access); raw_spin_lock_irq(&pci_lock); } while (dev->block_cfg_access); - __remove_wait_queue(&pci_cfg_wait, &wait); } /* Returns 0 on success, negative values indicate error. */ diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 8e40b3e6da77..3cef835b375f 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -322,12 +322,8 @@ void pci_bus_add_device(struct pci_dev *dev) dev->match_driver = true; retval = device_attach(&dev->dev); - if (retval < 0 && retval != -EPROBE_DEFER) { + if (retval < 0 && retval != -EPROBE_DEFER) pci_warn(dev, "device attach failed (%d)\n", retval); - pci_proc_detach_device(dev); - pci_remove_sysfs_dev_files(dev); - return; - } pci_dev_assign_added(dev, true); } diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index b927a92e3463..8c9f88704874 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -301,11 +301,11 @@ static void meson_pcie_init_dw(struct meson_pcie *mp) meson_cfg_writel(mp, val, PCIE_CFG0); val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF); - val &= ~LINK_CAPABLE_MASK; + val &= ~(LINK_CAPABLE_MASK | FAST_LINK_MODE); meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF); val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF); - val |= LINK_CAPABLE_X1 | FAST_LINK_MODE; + val |= LINK_CAPABLE_X1; meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF); val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index b796c63a81ca..8027cf55193c 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -263,6 +263,8 @@ int dw_pcie_allocate_domains(struct pcie_port *pp) return -ENOMEM; } + irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); + pp->msi_domain = pci_msi_create_irq_domain(fwnode, &dw_pcie_msi_domain_info, pp->irq_domain); diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 70ded8900e28..374db5d59cf8 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -45,7 +45,13 @@ #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 #define PCIE20_PARF_PHY_CTRL 0x40 +#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) +#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) + #define PCIE20_PARF_PHY_REFCLK 0x4C +#define PHY_REFCLK_SSP_EN BIT(16) +#define PHY_REFCLK_USE_PAD BIT(12) + #define PCIE20_PARF_DBI_BASE_ADDR 0x168 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 @@ -76,6 +82,18 @@ #define DBI_RO_WR_EN 1 #define PERST_DELAY_US 1000 +/* PARF registers */ +#define PCIE20_PARF_PCS_DEEMPH 0x34 +#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) + +#define PCIE20_PARF_PCS_SWING 0x38 +#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) +#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) + +#define PCIE20_PARF_CONFIG_BITS 0x50 +#define PHY_RX0_EQ(x) ((x) << 24) #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 #define SLV_ADDR_SPACE_SZ 0x10000000 @@ -85,11 +103,14 @@ struct qcom_pcie_resources_2_1_0 { struct clk *iface_clk; struct clk *core_clk; struct clk *phy_clk; + struct clk *aux_clk; + struct clk *ref_clk; struct reset_control *pci_reset; struct reset_control *axi_reset; struct reset_control *ahb_reset; struct reset_control *por_reset; struct reset_control *phy_reset; + struct reset_control *ext_reset; struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; }; @@ -235,6 +256,14 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) if (IS_ERR(res->phy_clk)) return PTR_ERR(res->phy_clk); + res->aux_clk = devm_clk_get_optional(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + res->ref_clk = devm_clk_get_optional(dev, "ref"); + if (IS_ERR(res->ref_clk)) + return PTR_ERR(res->ref_clk); + res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); if (IS_ERR(res->pci_reset)) return PTR_ERR(res->pci_reset); @@ -251,6 +280,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) if (IS_ERR(res->por_reset)) return PTR_ERR(res->por_reset); + res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); + if (IS_ERR(res->ext_reset)) + return PTR_ERR(res->ext_reset); + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); return PTR_ERR_OR_ZERO(res->phy_reset); } @@ -259,14 +292,17 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; + clk_disable_unprepare(res->phy_clk); reset_control_assert(res->pci_reset); reset_control_assert(res->axi_reset); reset_control_assert(res->ahb_reset); reset_control_assert(res->por_reset); - reset_control_assert(res->pci_reset); + reset_control_assert(res->ext_reset); + reset_control_assert(res->phy_reset); clk_disable_unprepare(res->iface_clk); clk_disable_unprepare(res->core_clk); - clk_disable_unprepare(res->phy_clk); + clk_disable_unprepare(res->aux_clk); + clk_disable_unprepare(res->ref_clk); regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); } @@ -275,6 +311,7 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; + struct device_node *node = dev->of_node; u32 val; int ret; @@ -296,32 +333,64 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) goto err_assert_ahb; } - ret = clk_prepare_enable(res->phy_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable phy clock\n"); - goto err_clk_phy; - } - ret = clk_prepare_enable(res->core_clk); if (ret) { dev_err(dev, "cannot prepare/enable core clock\n"); goto err_clk_core; } + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_clk_aux; + } + + ret = clk_prepare_enable(res->ref_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable ref clock\n"); + goto err_clk_ref; + } + ret = reset_control_deassert(res->ahb_reset); if (ret) { dev_err(dev, "cannot deassert ahb reset\n"); goto err_deassert_ahb; } + ret = reset_control_deassert(res->ext_reset); + if (ret) { + dev_err(dev, "cannot deassert ext reset\n"); + goto err_deassert_ahb; + } + /* enable PCIe clocks and resets */ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); val &= ~BIT(0); writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), + pcie->parf + PCIE20_PARF_PCS_DEEMPH); + writel(PCS_SWING_TX_SWING_FULL(120) | + PCS_SWING_TX_SWING_LOW(120), + pcie->parf + PCIE20_PARF_PCS_SWING); + writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); + } + + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + /* set TX termination offset */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; + val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + } + /* enable external reference clock */ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); - val |= BIT(16); + val &= ~PHY_REFCLK_USE_PAD; + val |= PHY_REFCLK_SSP_EN; writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); ret = reset_control_deassert(res->phy_reset); @@ -348,6 +417,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) return ret; } + ret = clk_prepare_enable(res->phy_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable phy clock\n"); + goto err_deassert_ahb; + } + /* wait for clock acquisition */ usleep_range(1000, 1500); @@ -361,10 +436,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) return 0; err_deassert_ahb: + clk_disable_unprepare(res->ref_clk); +err_clk_ref: + clk_disable_unprepare(res->aux_clk); +err_clk_aux: clk_disable_unprepare(res->core_clk); err_clk_core: - clk_disable_unprepare(res->phy_clk); -err_clk_phy: clk_disable_unprepare(res->iface_clk); err_assert_ahb: regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 97245e076548..f2481e80e272 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -344,10 +344,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) advk_pcie_wait_for_link(pcie); - reg = PCIE_CORE_LINK_L0S_ENTRY | - (1 << PCIE_CORE_LINK_WIDTH_SHIFT); - advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); - reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | PCIE_CORE_CMD_IO_ACCESS_EN | diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index ac93f5a0398e..b71e753419c2 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -181,13 +181,6 @@ #define AFI_PEXBIAS_CTRL_0 0x168 -#define RP_PRIV_XP_DL 0x00000494 -#define RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1ff << 1) - -#define RP_RX_HDR_LIMIT 0x00000e00 -#define RP_RX_HDR_LIMIT_PW_MASK (0xff << 8) -#define RP_RX_HDR_LIMIT_PW (0x0e << 8) - #define RP_ECTL_2_R1 0x00000e84 #define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff @@ -323,7 +316,6 @@ struct tegra_pcie_soc { bool program_uphy; bool update_clamp_threshold; bool program_deskew_time; - bool raw_violation_fixup; bool update_fc_timer; bool has_cache_bars; struct { @@ -669,23 +661,6 @@ static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port) writel(value, port->base + RP_VEND_CTL0); } - /* Fixup for read after write violation. */ - if (soc->raw_violation_fixup) { - value = readl(port->base + RP_RX_HDR_LIMIT); - value &= ~RP_RX_HDR_LIMIT_PW_MASK; - value |= RP_RX_HDR_LIMIT_PW; - writel(value, port->base + RP_RX_HDR_LIMIT); - - value = readl(port->base + RP_PRIV_XP_DL); - value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD; - writel(value, port->base + RP_PRIV_XP_DL); - - value = readl(port->base + RP_VEND_XP); - value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; - value |= soc->update_fc_threshold; - writel(value, port->base + RP_VEND_XP); - } - if (soc->update_fc_timer) { value = readl(port->base + RP_VEND_XP); value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; @@ -2511,7 +2486,6 @@ static const struct tegra_pcie_soc tegra20_pcie = { .program_uphy = true, .update_clamp_threshold = false, .program_deskew_time = false, - .raw_violation_fixup = false, .update_fc_timer = false, .has_cache_bars = true, .ectl.enable = false, @@ -2541,7 +2515,6 @@ static const struct tegra_pcie_soc tegra30_pcie = { .program_uphy = true, .update_clamp_threshold = false, .program_deskew_time = false, - .raw_violation_fixup = false, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, @@ -2554,8 +2527,6 @@ static const struct tegra_pcie_soc tegra124_pcie = { .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x44ac44ac, - /* FC threshold is bit[25:18] */ - .update_fc_threshold = 0x03fc0000, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, @@ -2565,7 +2536,6 @@ static const struct tegra_pcie_soc tegra124_pcie = { .program_uphy = true, .update_clamp_threshold = true, .program_deskew_time = false, - .raw_violation_fixup = true, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, @@ -2589,7 +2559,6 @@ static const struct tegra_pcie_soc tegra210_pcie = { .program_uphy = true, .update_clamp_threshold = true, .program_deskew_time = true, - .raw_violation_fixup = false, .update_fc_timer = true, .has_cache_bars = false, .ectl = { @@ -2631,7 +2600,6 @@ static const struct tegra_pcie_soc tegra186_pcie = { .program_uphy = false, .update_clamp_threshold = false, .program_deskew_time = false, - .raw_violation_fixup = false, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c index d219404bad92..9a86bb7448ac 100644 --- a/drivers/pci/controller/pci-v3-semi.c +++ b/drivers/pci/controller/pci-v3-semi.c @@ -743,7 +743,7 @@ static int v3_pci_probe(struct platform_device *pdev) int ret; LIST_HEAD(res); - host = pci_alloc_host_bridge(sizeof(*v3)); + host = devm_pci_alloc_host_bridge(dev, sizeof(*v3)); if (!host) return -ENOMEM; diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c index 97e251090b4f..0dfc778f40a7 100644 --- a/drivers/pci/controller/pcie-cadence-host.c +++ b/drivers/pci/controller/pcie-cadence-host.c @@ -102,6 +102,7 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; u32 value, ctrl; + u32 id; /* * Set the root complex BAR configuration register: @@ -121,8 +122,12 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); /* Set root port configuration space */ - if (rc->vendor_id != 0xffff) - cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); + if (rc->vendor_id != 0xffff) { + id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) | + CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); + } + if (rc->device_id != 0xffff) cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c index 1ad0b56f11b4..04114352d0e7 100644 --- a/drivers/pci/controller/pcie-rcar.c +++ b/drivers/pci/controller/pcie-rcar.c @@ -335,11 +335,12 @@ static struct pci_ops rcar_pcie_ops = { }; static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, - struct resource *res) + struct resource_entry *window) { /* Setup PCIe address space mappings for each resource */ resource_size_t size; resource_size_t res_start; + struct resource *res = window->res; u32 mask; rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); @@ -353,9 +354,9 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); if (res->flags & IORESOURCE_IO) - res_start = pci_pio_to_address(res->start); + res_start = pci_pio_to_address(res->start) - window->offset; else - res_start = res->start; + res_start = res->start - window->offset; rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win)); rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F, @@ -384,7 +385,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) switch (resource_type(res)) { case IORESOURCE_IO: case IORESOURCE_MEM: - rcar_pcie_setup_window(i, pci, res); + rcar_pcie_setup_window(i, pci, win); i++; break; case IORESOURCE_BUS: diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index a35d3f3996d7..9966dcf1d112 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -593,9 +593,11 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) if (!membar2) return -ENOMEM; offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - - readq(membar2 + MB2_SHADOW_OFFSET); + (readq(membar2 + MB2_SHADOW_OFFSET) & + PCI_BASE_ADDRESS_MEM_MASK); offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - - readq(membar2 + MB2_SHADOW_OFFSET + 8); + (readq(membar2 + MB2_SHADOW_OFFSET + 8) & + PCI_BASE_ADDRESS_MEM_MASK); pci_iounmap(vmd->dev, membar2); } } @@ -678,9 +680,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, x86_vector_domain); - irq_domain_free_fwnode(fn); - if (!vmd->irq_domain) + if (!vmd->irq_domain) { + irq_domain_free_fwnode(fn); return -ENODEV; + } pci_add_resource(&resources, &vmd->resources[0]); pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); @@ -691,6 +694,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) if (!vmd->bus) { pci_free_resource_list(&resources); irq_domain_remove(vmd->irq_domain); + irq_domain_free_fwnode(fn); return -ENODEV; } @@ -805,6 +809,7 @@ static void vmd_cleanup_srcu(struct vmd_dev *vmd) static void vmd_remove(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); + struct fwnode_handle *fn = vmd->irq_domain->fwnode; sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); @@ -813,6 +818,7 @@ static void vmd_remove(struct pci_dev *dev) vmd_teardown_dma_ops(vmd); vmd_detach_resources(vmd); irq_domain_remove(vmd->irq_domain); + irq_domain_free_fwnode(fn); } #ifdef CONFIG_PM_SLEEP @@ -854,6 +860,8 @@ static const struct pci_device_id vmd_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | VMD_FEAT_HAS_BUS_RESTRICTIONS,}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B), + .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,}, {0,} }; MODULE_DEVICE_TABLE(pci, vmd_ids); diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index b3869951c0eb..6e60b4b1bf53 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -122,13 +122,21 @@ static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev) struct acpiphp_context *context; acpi_lock_hp_context(); + context = acpiphp_get_context(adev); - if (!context || context->func.parent->is_going_away) { - acpi_unlock_hp_context(); - return NULL; + if (!context) + goto unlock; + + if (context->func.parent->is_going_away) { + acpiphp_put_context(context); + context = NULL; + goto unlock; } + get_bridge(context->func.parent); acpiphp_put_context(context); + +unlock: acpi_unlock_hp_context(); return context; } diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c index 5fd90105510d..d3b6b9a05618 100644 --- a/drivers/pci/pci-bridge-emul.c +++ b/drivers/pci/pci-bridge-emul.c @@ -195,8 +195,8 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { * RO, the rest is reserved */ .w1c = GENMASK(19, 16), - .ro = GENMASK(20, 19), - .rsvd = GENMASK(31, 21), + .ro = GENMASK(21, 20), + .rsvd = GENMASK(31, 22), }, [PCI_EXP_LNKCAP / 4] = { @@ -236,7 +236,7 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC) << 16, .ro = (PCI_EXP_SLTSTA_MRLSS | PCI_EXP_SLTSTA_PDS | PCI_EXP_SLTSTA_EIS) << 16, - .rsvd = GENMASK(15, 12) | (GENMASK(15, 9) << 16), + .rsvd = GENMASK(15, 13) | (GENMASK(15, 9) << 16), }, [PCI_EXP_RTCTL / 4] = { diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index c73e8095a849..b1b2c8ddbc92 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -802,7 +802,9 @@ static inline bool platform_pci_need_resume(struct pci_dev *dev) static inline bool platform_pci_bridge_d3(struct pci_dev *dev) { - return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false; + if (pci_platform_pm && pci_platform_pm->bridge_d3) + return pci_platform_pm->bridge_d3(dev); + return false; } /** diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 5a1bbf2cb7e9..7624c71011c6 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -628,16 +628,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) /* Setup initial capable state. Will be updated later */ link->aspm_capable = link->aspm_support; - /* - * If the downstream component has pci bridge function, don't - * do ASPM for now. - */ - list_for_each_entry(child, &linkbus->devices, bus_list) { - if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) { - link->aspm_disable = ASPM_STATE_ALL; - break; - } - } /* Get and check endpoint acceptable latencies */ list_for_each_entry(child, &linkbus->devices, bus_list) { @@ -1167,6 +1157,7 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); else cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); + cnt += sprintf(buffer + cnt, "\n"); return cnt; } diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c index 9361f3aa26ab..357a454cafa0 100644 --- a/drivers/pci/pcie/ptm.c +++ b/drivers/pci/pcie/ptm.c @@ -39,10 +39,6 @@ void pci_ptm_init(struct pci_dev *dev) if (!pci_is_pcie(dev)) return; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); - if (!pos) - return; - /* * Enable PTM only on interior devices (root ports, switch ports, * etc.) on the assumption that it causes no link traffic until an @@ -52,6 +48,23 @@ void pci_ptm_init(struct pci_dev *dev) pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)) return; + /* + * Switch Downstream Ports are not permitted to have a PTM + * capability; their PTM behavior is controlled by the Upstream + * Port (PCIe r5.0, sec 7.9.16). + */ + ups = pci_upstream_bridge(dev); + if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM && + ups && ups->ptm_enabled) { + dev->ptm_granularity = ups->ptm_granularity; + dev->ptm_enabled = 1; + return; + } + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); + if (!pos) + return; + pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap); local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8; @@ -61,7 +74,6 @@ void pci_ptm_init(struct pci_dev *dev) * the spec recommendation (PCIe r3.1, sec 7.32.3), select the * furthest upstream Time Source as the PTM Root. */ - ups = pci_upstream_bridge(dev); if (ups && ups->ptm_enabled) { ctrl = PCI_PTM_CTRL_ENABLE; if (ups->ptm_granularity == 0) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index d3033873395d..8fa13486f2f1 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -867,9 +867,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) goto free; err = device_register(&bridge->dev); - if (err) + if (err) { put_device(&bridge->dev); - + goto free; + } bus->bridge = get_device(&bridge->dev); device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); @@ -1777,7 +1778,7 @@ int pci_setup_device(struct pci_dev *dev) /* Device class may be changed after fixup */ class = dev->class >> 8; - if (dev->non_compliant_bars) { + if (dev->non_compliant_bars && !dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); @@ -1889,13 +1890,33 @@ static void pci_configure_mps(struct pci_dev *dev) struct pci_dev *bridge = pci_upstream_bridge(dev); int mps, mpss, p_mps, rc; - if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge)) + if (!pci_is_pcie(dev)) return; /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */ if (dev->is_virtfn) return; + /* + * For Root Complex Integrated Endpoints, program the maximum + * supported value unless limited by the PCIE_BUS_PEER2PEER case. + */ + if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) { + if (pcie_bus_config == PCIE_BUS_PEER2PEER) + mps = 128; + else + mps = 128 << dev->pcie_mpss; + rc = pcie_set_mps(dev, mps); + if (rc) { + pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", + mps); + } + return; + } + + if (!bridge || !pci_is_pcie(bridge)) + return; + mps = pcie_get_mps(dev); p_mps = pcie_get_mps(bridge); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 029add7cf75d..654f8a1e7d61 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2334,6 +2334,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); +static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev) +{ + pci_info(dev, "Disabling ASPM L0s/L1\n"); + pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); +} + +/* + * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the + * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected; + * disable both L0s and L1 for now to be safe. + */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1); + /* * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain * Link bit cleared after starting the link retrain process to allow this @@ -4442,6 +4455,8 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) if (ACPI_FAILURE(status)) return -ENODEV; + acpi_put_table(header); + /* Filter out flags not applicable to multifunction */ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); @@ -4692,6 +4707,20 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); } +static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags) +{ + /* + * Intel RCiEP's are required to allow p2p only on translated + * addresses. Refer to Intel VT-d specification, r3.1, sec 3.16, + * "Root-Complex Peer to Peer Considerations". + */ + if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END) + return -ENOTTY; + + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); +} + static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) { /* @@ -4774,6 +4803,7 @@ static const struct pci_dev_acs_enabled { /* I219 */ { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs }, /* QCOM QDF2xxx root ports */ { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs }, { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs }, @@ -5139,13 +5169,25 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap); -/* FLR may cause some 82579 devices to hang */ -static void quirk_intel_no_flr(struct pci_dev *dev) +/* + * FLR may cause the following to devices to hang: + * + * AMD Starship/Matisse HD Audio Controller 0x1487 + * AMD Starship USB 3.0 Host Controller 0x148c + * AMD Matisse USB 3.0 Host Controller 0x149c + * Intel 82579LM Gigabit Ethernet Controller 0x1502 + * Intel 82579V Gigabit Ethernet Controller 0x1503 + * + */ +static void quirk_no_flr(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET; } -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr); static void quirk_no_ext_tags(struct pci_dev *pdev) { @@ -5175,7 +5217,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); */ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { - if (pdev->device == 0x7340 && pdev->revision != 0xc5) + if ((pdev->device == 0x7312 && pdev->revision != 0x00) || + (pdev->device == 0x7340 && pdev->revision != 0xc5)) return; pci_info(pdev, "disabling ATS\n"); @@ -5186,6 +5229,8 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); /* AMD Iceland dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); +/* AMD Navi10 dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats); /* AMD Navi14 dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ @@ -5560,6 +5605,19 @@ static void pci_fixup_no_d0_pme(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme); +/* + * Device [12d8:0x400e] and [12d8:0x400f] + * These devices advertise PME# support in all power states but don't + * reliably assert it. + */ +static void pci_fixup_no_pme(struct pci_dev *dev) +{ + pci_info(dev, "PME# is unreliable, disabling it\n"); + dev->pme_support = 0; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_pme); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_pme); + static void apex_pci_fixup_class(struct pci_dev *pdev) { pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class; diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index d8ca40a97693..d21fa04fa44d 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -439,10 +439,11 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) res->end = res->start + pci_rebar_size_to_bytes(size) - 1; /* Check if the new config works by trying to assign everything. */ - ret = pci_reassign_bridge_resources(dev->bus->self, res->flags); - if (ret) - goto error_resize; - + if (dev->bus->self) { + ret = pci_reassign_bridge_resources(dev->bus->self, res->flags); + if (ret) + goto error_resize; + } return 0; error_resize: diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index ae4aa0e1f2f4..1f087746b7bb 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -304,13 +304,16 @@ placeholder: slot_name = make_slot_name(name); if (!slot_name) { err = -ENOMEM; + kfree(slot); goto err; } err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, "%s", slot_name); - if (err) + if (err) { + kobject_put(&slot->kobj); goto err; + } INIT_LIST_HEAD(&slot->list); list_add(&slot->list, &parent->slots); @@ -329,7 +332,6 @@ out: mutex_unlock(&pci_slot_mutex); return slot; err: - kfree(slot); slot = ERR_PTR(err); goto out; } diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 8f8606b9bc9e..aca4570f78a8 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -1720,6 +1720,7 @@ static struct platform_driver cci_pmu_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = arm_cci_pmu_matches, + .suppress_bind_attrs = true, }, .probe = cci_pmu_probe, .remove = cci_pmu_remove, diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index 6fc0273b6129..336948b41bd1 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1545,6 +1545,7 @@ static struct platform_driver arm_ccn_driver = { .driver = { .name = "arm-ccn", .of_match_table = arm_ccn_match, + .suppress_bind_attrs = true, }, .probe = arm_ccn_probe, .remove = arm_ccn_remove, diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 70968c8c09d7..4594e2ed13d5 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -759,6 +759,7 @@ static struct platform_driver dsu_pmu_driver = { .driver = { .name = DRVNAME, .of_match_table = of_match_ptr(dsu_pmu_of_match), + .suppress_bind_attrs = true, }, .probe = dsu_pmu_device_probe, .remove = dsu_pmu_device_remove, diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 2f8787276d9b..9cdd89b29334 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -742,6 +742,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, smmu_pmu); smmu_pmu->pmu = (struct pmu) { + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .pmu_enable = smmu_pmu_enable, .pmu_disable = smmu_pmu_disable, @@ -815,7 +816,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) if (err) { dev_err(dev, "Error %d registering hotplug, PMU @%pa\n", err, &res_0->start); - return err; + goto out_clear_affinity; } err = perf_pmu_register(&smmu_pmu->pmu, name, -1); @@ -834,6 +835,8 @@ static int smmu_pmu_probe(struct platform_device *pdev) out_unregister: cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); +out_clear_affinity: + irq_set_affinity_hint(smmu_pmu->irq, NULL); return err; } @@ -843,6 +846,7 @@ static int smmu_pmu_remove(struct platform_device *pdev) perf_pmu_unregister(&smmu_pmu->pmu); cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); + irq_set_affinity_hint(smmu_pmu->irq, NULL); return 0; } @@ -857,6 +861,7 @@ static void smmu_pmu_shutdown(struct platform_device *pdev) static struct platform_driver smmu_pmu_driver = { .driver = { .name = "arm-smmu-v3-pmcg", + .suppress_bind_attrs = true, }, .probe = smmu_pmu_probe, .remove = smmu_pmu_remove, diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 4e4984a55cd1..079701e8de18 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -1228,6 +1228,7 @@ static struct platform_driver arm_spe_pmu_driver = { .driver = { .name = DRVNAME, .of_match_table = of_match_ptr(arm_spe_pmu_of_match), + .suppress_bind_attrs = true, }, .probe = arm_spe_pmu_device_probe, .remove = arm_spe_pmu_device_remove, diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 0d9c7e228606..73fb2ba48cbe 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -652,6 +652,7 @@ static void ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, { *pmu = (struct ddr_pmu) { .pmu = (struct pmu) { + .module = THIS_MODULE, .capabilities = PERF_PMU_CAP_NO_EXCLUDE, .task_ctx_nr = perf_invalid_context, .event_init = ddr_perf_event_init, @@ -895,6 +896,7 @@ static struct platform_driver imx_ddr_pmu_driver = { .driver = { .name = "imx-ddr-pmu", .of_match_table = imx_ddr_pmu_dt_ids, + .suppress_bind_attrs = true, }, .probe = ddr_perf_probe, .remove = ddr_perf_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index e42d4464c2cf..b79c96b14328 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -381,6 +381,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) ddrc_pmu->sccl_id, ddrc_pmu->index_id); ddrc_pmu->pmu = (struct pmu) { .name = name, + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .event_init = hisi_uncore_pmu_event_init, .pmu_enable = hisi_uncore_pmu_enable, @@ -419,6 +420,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = { .driver = { .name = "hisi_ddrc_pmu", .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_ddrc_pmu_probe, .remove = hisi_ddrc_pmu_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index f28063873e11..78865b4ac4a6 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -285,7 +285,7 @@ static struct attribute *hisi_hha_pmu_events_attr[] = { HISI_PMU_EVENT_ATTR(rx_wbip, 0x05), HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11), HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c), - HISI_PMU_EVENT_ATTR(wr_dr_64b, 0x1d), + HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d), HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e), HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f), HISI_PMU_EVENT_ATTR(spill_num, 0x20), @@ -392,6 +392,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) hha_pmu->sccl_id, hha_pmu->index_id); hha_pmu->pmu = (struct pmu) { .name = name, + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .event_init = hisi_uncore_pmu_event_init, .pmu_enable = hisi_uncore_pmu_enable, @@ -430,6 +431,7 @@ static struct platform_driver hisi_hha_pmu_driver = { .driver = { .name = "hisi_hha_pmu", .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_hha_pmu_probe, .remove = hisi_hha_pmu_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 078b8dc57250..9dd50c3bc74e 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -35,7 +35,7 @@ /* L3C has 8-counters */ #define L3C_NR_COUNTERS 0x8 -#define L3C_PERF_CTRL_EN 0x20000 +#define L3C_PERF_CTRL_EN 0x10000 #define L3C_EVTYPE_NONE 0xff /* @@ -382,6 +382,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) l3c_pmu->sccl_id, l3c_pmu->index_id); l3c_pmu->pmu = (struct pmu) { .name = name, + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .event_init = hisi_uncore_pmu_event_init, .pmu_enable = hisi_uncore_pmu_enable, @@ -420,6 +421,7 @@ static struct platform_driver hisi_l3c_pmu_driver = { .driver = { .name = "hisi_l3c_pmu", .acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_l3c_pmu_probe, .remove = hisi_l3c_pmu_remove, diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 21d6991dbe0b..4da37f650f98 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -1028,6 +1028,7 @@ static struct platform_driver l2_cache_pmu_driver = { .driver = { .name = "qcom-l2cache-pmu", .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = l2_cache_pmu_probe, .remove = l2_cache_pmu_remove, diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c index 656e830798d9..9ddb577c542b 100644 --- a/drivers/perf/qcom_l3_pmu.c +++ b/drivers/perf/qcom_l3_pmu.c @@ -814,6 +814,7 @@ static struct platform_driver qcom_l3_cache_pmu_driver = { .driver = { .name = "qcom-l3cache-pmu", .acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = qcom_l3_cache_pmu_probe, }; diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c index 43d76c85da56..9e1c3c7eeba9 100644 --- a/drivers/perf/thunderx2_pmu.c +++ b/drivers/perf/thunderx2_pmu.c @@ -816,6 +816,7 @@ static struct platform_driver tx2_uncore_driver = { .driver = { .name = "tx2-uncore-pmu", .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match), + .suppress_bind_attrs = true, }, .probe = tx2_uncore_probe, .remove = tx2_uncore_remove, diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index 7e328d6385c3..328aea9f6be3 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -1981,6 +1981,7 @@ static struct platform_driver xgene_pmu_driver = { .name = "xgene-pmu", .of_match_table = xgene_pmu_of_match, .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match), + .suppress_bind_attrs = true, }, }; diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index 856927382248..e5842e48a5e0 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -545,13 +545,14 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) struct sun4i_usb_phy_data *data = container_of(work, struct sun4i_usb_phy_data, detect.work); struct phy *phy0 = data->phys[0].phy; - struct sun4i_usb_phy *phy = phy_get_drvdata(phy0); + struct sun4i_usb_phy *phy; bool force_session_end, id_notify = false, vbus_notify = false; int id_det, vbus_det; - if (phy0 == NULL) + if (!phy0) return; + phy = phy_get_drvdata(phy0); id_det = sun4i_usb_phy0_get_id_det(data); vbus_det = sun4i_usb_phy0_get_vbus_det(data); diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c index fe6c58910e4c..7c7862b4f41f 100644 --- a/drivers/phy/broadcom/phy-bcm-sr-usb.c +++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c @@ -16,8 +16,6 @@ enum bcm_usb_phy_version { }; enum bcm_usb_phy_reg { - PLL_NDIV_FRAC, - PLL_NDIV_INT, PLL_CTRL, PHY_CTRL, PHY_PLL_CTRL, @@ -31,18 +29,11 @@ static const u8 bcm_usb_combo_phy_ss[] = { }; static const u8 bcm_usb_combo_phy_hs[] = { - [PLL_NDIV_FRAC] = 0x04, - [PLL_NDIV_INT] = 0x08, [PLL_CTRL] = 0x0c, [PHY_CTRL] = 0x10, }; -#define HSPLL_NDIV_INT_VAL 0x13 -#define HSPLL_NDIV_FRAC_VAL 0x1005 - static const u8 bcm_usb_hs_phy[] = { - [PLL_NDIV_FRAC] = 0x0, - [PLL_NDIV_INT] = 0x4, [PLL_CTRL] = 0x8, [PHY_CTRL] = 0xc, }; @@ -52,7 +43,6 @@ enum pll_ctrl_bits { SSPLL_SUSPEND_EN, PLL_SEQ_START, PLL_LOCK, - PLL_PDIV, }; static const u8 u3pll_ctrl[] = { @@ -66,29 +56,17 @@ static const u8 u3pll_ctrl[] = { #define HSPLL_PDIV_VAL 0x1 static const u8 u2pll_ctrl[] = { - [PLL_PDIV] = 1, [PLL_RESETB] = 5, [PLL_LOCK] = 6, }; enum bcm_usb_phy_ctrl_bits { CORERDY, - AFE_LDO_PWRDWNB, - AFE_PLL_PWRDWNB, - AFE_BG_PWRDWNB, - PHY_ISO, PHY_RESETB, PHY_PCTL, }; #define PHY_PCTL_MASK 0xffff -/* - * 0x0806 of PCTL_VAL has below bits set - * BIT-8 : refclk divider 1 - * BIT-3:2: device mode; mode is not effect - * BIT-1: soft reset active low - */ -#define HSPHY_PCTL_VAL 0x0806 #define SSPHY_PCTL_VAL 0x0006 static const u8 u3phy_ctrl[] = { @@ -98,10 +76,6 @@ static const u8 u3phy_ctrl[] = { static const u8 u2phy_ctrl[] = { [CORERDY] = 0, - [AFE_LDO_PWRDWNB] = 1, - [AFE_PLL_PWRDWNB] = 2, - [AFE_BG_PWRDWNB] = 3, - [PHY_ISO] = 4, [PHY_RESETB] = 5, [PHY_PCTL] = 6, }; @@ -186,38 +160,13 @@ static int bcm_usb_hs_phy_init(struct bcm_usb_phy_cfg *phy_cfg) int ret = 0; void __iomem *regs = phy_cfg->regs; const u8 *offset; - u32 rd_data; offset = phy_cfg->offset; - writel(HSPLL_NDIV_INT_VAL, regs + offset[PLL_NDIV_INT]); - writel(HSPLL_NDIV_FRAC_VAL, regs + offset[PLL_NDIV_FRAC]); - - rd_data = readl(regs + offset[PLL_CTRL]); - rd_data &= ~(HSPLL_PDIV_MASK << u2pll_ctrl[PLL_PDIV]); - rd_data |= (HSPLL_PDIV_VAL << u2pll_ctrl[PLL_PDIV]); - writel(rd_data, regs + offset[PLL_CTRL]); - - /* Set Core Ready high */ - bcm_usb_reg32_setbits(regs + offset[PHY_CTRL], - BIT(u2phy_ctrl[CORERDY])); - - /* Maximum timeout for Core Ready done */ - msleep(30); - + bcm_usb_reg32_clrbits(regs + offset[PLL_CTRL], + BIT(u2pll_ctrl[PLL_RESETB])); bcm_usb_reg32_setbits(regs + offset[PLL_CTRL], BIT(u2pll_ctrl[PLL_RESETB])); - bcm_usb_reg32_setbits(regs + offset[PHY_CTRL], - BIT(u2phy_ctrl[PHY_RESETB])); - - - rd_data = readl(regs + offset[PHY_CTRL]); - rd_data &= ~(PHY_PCTL_MASK << u2phy_ctrl[PHY_PCTL]); - rd_data |= (HSPHY_PCTL_VAL << u2phy_ctrl[PHY_PCTL]); - writel(rd_data, regs + offset[PHY_CTRL]); - - /* Maximum timeout for PLL reset done */ - msleep(30); ret = bcm_usb_pll_lock_check(regs + offset[PLL_CTRL], BIT(u2pll_ctrl[PLL_LOCK])); diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c index 6960dfd8ad8c..0fe408964334 100644 --- a/drivers/phy/marvell/phy-armada38x-comphy.c +++ b/drivers/phy/marvell/phy-armada38x-comphy.c @@ -41,6 +41,7 @@ struct a38x_comphy_lane { struct a38x_comphy { void __iomem *base; + void __iomem *conf; struct device *dev; struct a38x_comphy_lane lane[MAX_A38X_COMPHY]; }; @@ -54,6 +55,21 @@ static const u8 gbe_mux[MAX_A38X_COMPHY][MAX_A38X_PORTS] = { { 0, 0, 3 }, }; +static void a38x_set_conf(struct a38x_comphy_lane *lane, bool enable) +{ + struct a38x_comphy *priv = lane->priv; + u32 conf; + + if (priv->conf) { + conf = readl_relaxed(priv->conf); + if (enable) + conf |= BIT(lane->port); + else + conf &= ~BIT(lane->port); + writel(conf, priv->conf); + } +} + static void a38x_comphy_set_reg(struct a38x_comphy_lane *lane, unsigned int offset, u32 mask, u32 value) { @@ -97,6 +113,7 @@ static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub) { struct a38x_comphy_lane *lane = phy_get_drvdata(phy); unsigned int gen; + int ret; if (mode != PHY_MODE_ETHERNET) return -EINVAL; @@ -115,13 +132,20 @@ static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub) return -EINVAL; } + a38x_set_conf(lane, false); + a38x_comphy_set_speed(lane, gen, gen); - return a38x_comphy_poll(lane, COMPHY_STAT1, - COMPHY_STAT1_PLL_RDY_TX | - COMPHY_STAT1_PLL_RDY_RX, - COMPHY_STAT1_PLL_RDY_TX | - COMPHY_STAT1_PLL_RDY_RX); + ret = a38x_comphy_poll(lane, COMPHY_STAT1, + COMPHY_STAT1_PLL_RDY_TX | + COMPHY_STAT1_PLL_RDY_RX, + COMPHY_STAT1_PLL_RDY_TX | + COMPHY_STAT1_PLL_RDY_RX); + + if (ret == 0) + a38x_set_conf(lane, true); + + return ret; } static const struct phy_ops a38x_comphy_ops = { @@ -174,14 +198,21 @@ static int a38x_comphy_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); priv->dev = &pdev->dev; priv->base = base; + /* Optional */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "conf"); + if (res) { + priv->conf = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->conf)) + return PTR_ERR(priv->conf); + } + for_each_available_child_of_node(pdev->dev.of_node, child) { struct phy *phy; int ret; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 27dd20a7fe13..5ddbf9a1f328 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -402,8 +402,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf), QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1), QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f), QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6), QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf), QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0), @@ -429,7 +429,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0), QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1), @@ -438,7 +437,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19), QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19), - QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7), }; static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { @@ -446,6 +444,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6), QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2), QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36), + QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a), }; static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { @@ -456,7 +456,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb), QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4), - QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4), }; static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = { @@ -1107,6 +1106,9 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { .pwrdn_ctrl = SW_PWRDN, }; +static const char * const ipq8074_pciephy_clk_l[] = { + "aux", "cfg_ahb", +}; /* list of resets */ static const char * const ipq8074_pciephy_reset_l[] = { "phy", "common", @@ -1124,8 +1126,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl), .pcs_tbl = ipq8074_pcie_pcs_tbl, .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl), - .clk_list = NULL, - .num_clks = 0, + .clk_list = ipq8074_pciephy_clk_l, + .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l), .reset_list = ipq8074_pciephy_reset_l, .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), .vreg_list = NULL, diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index 335ea5d7ef40..f6b1e6359b8c 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -77,6 +77,8 @@ #define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc /* Only for QMP V2 PHY - TX registers */ +#define QSERDES_TX_EMP_POST1_LVL 0x018 +#define QSERDES_TX_SLEW_CNTL 0x040 #define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054 #define QSERDES_TX_DEBUG_BUS_SEL 0x064 #define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068 diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index bfb22f868857..5087b7c44d55 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -111,6 +111,7 @@ struct rcar_gen3_chan { struct work_struct work; struct mutex lock; /* protects rphys[...].powered */ enum usb_dr_mode dr_mode; + int irq; bool extcon_host; bool is_otg_channel; bool uses_otg_pins; @@ -389,12 +390,38 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) rcar_gen3_device_recognition(ch); } +static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch) +{ + struct rcar_gen3_chan *ch = _ch; + void __iomem *usb2_base = ch->base; + u32 status = readl(usb2_base + USB2_OBINTSTA); + irqreturn_t ret = IRQ_NONE; + + if (status & USB2_OBINT_BITS) { + dev_vdbg(ch->dev, "%s: %08x\n", __func__, status); + writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA); + rcar_gen3_device_recognition(ch); + ret = IRQ_HANDLED; + } + + return ret; +} + static int rcar_gen3_phy_usb2_init(struct phy *p) { struct rcar_gen3_phy *rphy = phy_get_drvdata(p); struct rcar_gen3_chan *channel = rphy->ch; void __iomem *usb2_base = channel->base; u32 val; + int ret; + + if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) { + INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); + ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq, + IRQF_SHARED, dev_name(channel->dev), channel); + if (ret < 0) + dev_err(channel->dev, "No irq handler (%d)\n", channel->irq); + } /* Initialize USB2 part */ val = readl(usb2_base + USB2_INT_ENABLE); @@ -433,6 +460,9 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) val &= ~USB2_INT_ENABLE_UCOM_INTEN; writel(val, usb2_base + USB2_INT_ENABLE); + if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel)) + free_irq(channel->irq, channel); + return 0; } @@ -503,23 +533,6 @@ static const struct phy_ops rz_g1c_phy_usb2_ops = { .owner = THIS_MODULE, }; -static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch) -{ - struct rcar_gen3_chan *ch = _ch; - void __iomem *usb2_base = ch->base; - u32 status = readl(usb2_base + USB2_OBINTSTA); - irqreturn_t ret = IRQ_NONE; - - if (status & USB2_OBINT_BITS) { - dev_vdbg(ch->dev, "%s: %08x\n", __func__, status); - writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA); - rcar_gen3_device_recognition(ch); - ret = IRQ_HANDLED; - } - - return ret; -} - static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = { { .compatible = "renesas,usb2-phy-r8a77470", @@ -598,7 +611,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) struct phy_provider *provider; struct resource *res; const struct phy_ops *phy_usb2_ops; - int irq, ret = 0, i; + int ret = 0, i; if (!dev->of_node) { dev_err(dev, "This driver needs device tree\n"); @@ -614,16 +627,8 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) if (IS_ERR(channel->base)) return PTR_ERR(channel->base); - /* call request_irq for OTG */ - irq = platform_get_irq_optional(pdev, 0); - if (irq >= 0) { - INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); - irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq, - IRQF_SHARED, dev_name(dev), channel); - if (irq < 0) - dev_err(dev, "No irq handler (%d)\n", irq); - } - + /* get irq number here and request_irq for OTG in phy_init */ + channel->irq = platform_get_irq_optional(pdev, 0); channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node); if (channel->dr_mode != USB_DR_MODE_UNKNOWN) { int ret; diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index e510732afb8b..7f6279fb4f8f 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -714,7 +714,9 @@ static int exynos5_usbdrd_phy_calibrate(struct phy *phy) struct phy_usb_instance *inst = phy_get_drvdata(phy); struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); - return exynos5420_usbdrd_phy_calibrate(phy_drd); + if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) + return exynos5420_usbdrd_phy_calibrate(phy_drd); + return 0; } static const struct phy_ops exynos5_usbdrd_phy_ops = { diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index 03987cbb9d7d..507e4affcd73 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -774,16 +774,6 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev, return 0; } -/* - * imx_free_resources() - free memory used by this driver - * @info: info driver instance - */ -static void imx_free_resources(struct imx_pinctrl *ipctl) -{ - if (ipctl->pctl) - pinctrl_unregister(ipctl->pctl); -} - int imx_pinctrl_probe(struct platform_device *pdev, const struct imx_pinctrl_soc_info *info) { @@ -874,23 +864,18 @@ int imx_pinctrl_probe(struct platform_device *pdev, &ipctl->pctl); if (ret) { dev_err(&pdev->dev, "could not register IMX pinctrl driver\n"); - goto free; + return ret; } ret = imx_pinctrl_probe_dt(pdev, ipctl); if (ret) { dev_err(&pdev->dev, "fail to probe dt properties\n"); - goto free; + return ret; } dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); return pinctrl_enable(ipctl->pctl); - -free: - imx_free_resources(ipctl); - - return ret; } EXPORT_SYMBOL_GPL(imx_pinctrl_probe); diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index 7e29e3fecdb2..5bb183c0ce31 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -638,7 +638,6 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev, ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); if (ret) { - pinctrl_unregister(ipctl->pctl); dev_err(&pdev->dev, "Failed to populate subdevices\n"); return ret; } diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h index 3e5760f1a715..d4a192df5fab 100644 --- a/drivers/pinctrl/pinctrl-amd.h +++ b/drivers/pinctrl/pinctrl-amd.h @@ -252,7 +252,7 @@ static const struct amd_pingroup kerncz_groups[] = { { .name = "uart0", .pins = uart0_pins, - .npins = 9, + .npins = 5, }, { .name = "uart1", diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 6e2683016c1f..8bd0a078bfc4 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -1500,9 +1500,9 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd) */ high = ingenic_gpio_get_value(jzgc, irq); if (high) - irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_FALLING); + irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_LOW); else - irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING); + irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH); } if (jzgc->jzpc->version >= ID_JZ4760) @@ -1538,7 +1538,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type) */ bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq); - type = high ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING; + type = high ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH; } irq_set_type(jzgc, irqd->hwirq, type); @@ -1644,7 +1644,8 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) unsigned int pin = gc->base + offset; if (jzpc->version >= ID_JZ4760) - return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1); + return ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) || + ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1); if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT)) return true; diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index fb76fb2e9ea5..0a951a75c82b 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -711,11 +711,12 @@ static void ocelot_irq_handler(struct irq_desc *desc) struct irq_chip *parent_chip = irq_desc_get_chip(desc); struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct ocelot_pinctrl *info = gpiochip_get_data(chip); + unsigned int id_reg = OCELOT_GPIO_INTR_IDENT * info->stride; unsigned int reg = 0, irq, i; unsigned long irqs; for (i = 0; i < info->stride; i++) { - regmap_read(info->map, OCELOT_GPIO_INTR_IDENT + 4 * i, ®); + regmap_read(info->map, id_reg + 4 * i, ®); if (!reg) continue; diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index dc0bbf198cbc..1bd8840e11a6 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -506,8 +506,8 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev, } map_num += grp->npins; - new_map = devm_kcalloc(pctldev->dev, map_num, sizeof(*new_map), - GFP_KERNEL); + + new_map = kcalloc(map_num, sizeof(*new_map), GFP_KERNEL); if (!new_map) return -ENOMEM; @@ -517,7 +517,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev, /* create mux map */ parent = of_get_parent(np); if (!parent) { - devm_kfree(pctldev->dev, new_map); + kfree(new_map); return -EINVAL; } new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; @@ -544,6 +544,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev, static void rockchip_dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned num_maps) { + kfree(map); } static const struct pinctrl_ops rockchip_pctrl_ops = { diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c index 017fc6b3e27e..ca9da61cfc4e 100644 --- a/drivers/pinctrl/pinctrl-rza1.c +++ b/drivers/pinctrl/pinctrl-rza1.c @@ -418,7 +418,7 @@ static const struct rza1_bidir_entry rza1l_bidir_entries[RZA1_NPORTS] = { }; static const struct rza1_swio_entry rza1l_swio_entries[] = { - [0] = { ARRAY_SIZE(rza1h_swio_pins), rza1h_swio_pins }, + [0] = { ARRAY_SIZE(rza1l_swio_pins), rza1l_swio_pins }, }; /* RZ/A1L (r7s72102x) pinmux flags table */ diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 1e0614daee9b..a9d511982780 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -916,7 +916,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, /* If pinconf isn't supported, don't parse properties in below. */ if (!PCS_HAS_PINCONF) - return 0; + return -ENOTSUPP; /* cacluate how much properties are supported in current node */ for (i = 0; i < ARRAY_SIZE(prop2); i++) { @@ -928,7 +928,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, nconfs++; } if (!nconfs) - return 0; + return -ENOTSUPP; func->conf = devm_kcalloc(pcs->dev, nconfs, sizeof(struct pcs_conf_vals), @@ -1056,9 +1056,12 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, if (PCS_HAS_PINCONF && function) { res = pcs_parse_pinconf(pcs, np, function, map); - if (res) + if (res == 0) + *num_maps = 2; + else if (res == -ENOTSUPP) + *num_maps = 1; + else goto free_pingroups; - *num_maps = 2; } else { *num_maps = 1; } diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index f1fece5b9c06..3769ad08eadf 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -170,6 +170,7 @@ struct pmic_gpio_state { struct regmap *map; struct pinctrl_dev *ctrl; struct gpio_chip chip; + struct irq_chip irq; }; static const struct pinconf_generic_params pmic_gpio_bindings[] = { @@ -917,16 +918,6 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state, return 0; } -static struct irq_chip pmic_gpio_irq_chip = { - .name = "spmi-gpio", - .irq_ack = irq_chip_ack_parent, - .irq_mask = irq_chip_mask_parent, - .irq_unmask = irq_chip_unmask_parent, - .irq_set_type = irq_chip_set_type_parent, - .irq_set_wake = irq_chip_set_wake_parent, - .flags = IRQCHIP_MASK_ON_SUSPEND, -}; - static int pmic_gpio_domain_translate(struct irq_domain *domain, struct irq_fwspec *fwspec, unsigned long *hwirq, @@ -1053,8 +1044,16 @@ static int pmic_gpio_probe(struct platform_device *pdev) if (!parent_domain) return -ENXIO; + state->irq.name = "spmi-gpio", + state->irq.irq_ack = irq_chip_ack_parent, + state->irq.irq_mask = irq_chip_mask_parent, + state->irq.irq_unmask = irq_chip_unmask_parent, + state->irq.irq_set_type = irq_chip_set_type_parent, + state->irq.irq_set_wake = irq_chip_set_wake_parent, + state->irq.flags = IRQCHIP_MASK_ON_SUSPEND, + girq = &state->chip.irq; - girq->chip = &pmic_gpio_irq_chip; + girq->chip = &state->irq; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_level_irq; girq->fwnode = of_node_to_fwnode(state->dev->of_node); diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 0599f5127b01..84501c785473 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -40,6 +40,8 @@ struct exynos_irq_chip { u32 eint_pend; u32 eint_wake_mask_value; u32 eint_wake_mask_reg; + void (*set_eint_wakeup_mask)(struct samsung_pinctrl_drv_data *drvdata, + struct exynos_irq_chip *irq_chip); }; static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip) @@ -265,6 +267,7 @@ struct exynos_eint_gpio_save { u32 eint_con; u32 eint_fltcon0; u32 eint_fltcon1; + u32 eint_mask; }; /* @@ -342,6 +345,47 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on) return 0; } +static void +exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata, + struct exynos_irq_chip *irq_chip) +{ + struct regmap *pmu_regs; + + if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) { + dev_warn(drvdata->dev, + "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n"); + return; + } + + pmu_regs = drvdata->retention_ctrl->priv; + dev_info(drvdata->dev, + "Setting external wakeup interrupt mask: 0x%x\n", + irq_chip->eint_wake_mask_value); + + regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg, + irq_chip->eint_wake_mask_value); +} + +static void +s5pv210_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata, + struct exynos_irq_chip *irq_chip) + +{ + void __iomem *clk_base; + + if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) { + dev_warn(drvdata->dev, + "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n"); + return; + } + + + clk_base = (void __iomem *) drvdata->retention_ctrl->priv; + + __raw_writel(irq_chip->eint_wake_mask_value, + clk_base + irq_chip->eint_wake_mask_reg); +} + /* * irq_chip for wakeup interrupts */ @@ -360,8 +404,9 @@ static const struct exynos_irq_chip s5pv210_wkup_irq_chip __initconst = { .eint_mask = EXYNOS_WKUP_EMASK_OFFSET, .eint_pend = EXYNOS_WKUP_EPEND_OFFSET, .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED, - /* Only difference with exynos4210_wkup_irq_chip: */ + /* Only differences with exynos4210_wkup_irq_chip: */ .eint_wake_mask_reg = S5PV210_EINT_WAKEUP_MASK, + .set_eint_wakeup_mask = s5pv210_pinctrl_set_eint_wakeup_mask, }; static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = { @@ -380,6 +425,7 @@ static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = { .eint_pend = EXYNOS_WKUP_EPEND_OFFSET, .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED, .eint_wake_mask_reg = EXYNOS_EINT_WAKEUP_MASK, + .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask, }; static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = { @@ -398,6 +444,7 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = { .eint_pend = EXYNOS7_WKUP_EPEND_OFFSET, .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED, .eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK, + .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask, }; /* list of external wakeup controllers supported */ @@ -574,27 +621,6 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) return 0; } -static void -exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata, - struct exynos_irq_chip *irq_chip) -{ - struct regmap *pmu_regs; - - if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) { - dev_warn(drvdata->dev, - "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n"); - return; - } - - pmu_regs = drvdata->retention_ctrl->priv; - dev_info(drvdata->dev, - "Setting external wakeup interrupt mask: 0x%x\n", - irq_chip->eint_wake_mask_value); - - regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg, - irq_chip->eint_wake_mask_value); -} - static void exynos_pinctrl_suspend_bank( struct samsung_pinctrl_drv_data *drvdata, struct samsung_pin_bank *bank) @@ -608,10 +634,13 @@ static void exynos_pinctrl_suspend_bank( + 2 * bank->eint_offset); save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + 2 * bank->eint_offset + 4); + save->eint_mask = readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset); pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0); pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1); + pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask); } void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) @@ -626,8 +655,8 @@ void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) else if (bank->eint_type == EINT_TYPE_WKUP) { if (!irq_chip) { irq_chip = bank->irq_chip; - exynos_pinctrl_set_eint_wakeup_mask(drvdata, - irq_chip); + irq_chip->set_eint_wakeup_mask(drvdata, + irq_chip); } else if (bank->irq_chip != irq_chip) { dev_warn(drvdata->dev, "More than one external wakeup interrupt chip configured (bank: %s). This is not supported by hardware nor by driver.\n", @@ -653,6 +682,9 @@ static void exynos_pinctrl_resume_bank( pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + 2 * bank->eint_offset + 4), save->eint_fltcon1); + pr_debug("%s: mask %#010x => %#010x\n", bank->name, + readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset), save->eint_mask); writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET + bank->eint_offset); @@ -660,6 +692,8 @@ static void exynos_pinctrl_resume_bank( + 2 * bank->eint_offset); writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET + 2 * bank->eint_offset + 4); + writel(save->eint_mask, regs + bank->irq_chip->eint_mask + + bank->eint_offset); } void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index e9a7cbb9aa33..01bcef2c01bc 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -685,8 +685,8 @@ static int tegra_pinctrl_resume(struct device *dev) } const struct dev_pm_ops tegra_pinctrl_pm = { - .suspend = &tegra_pinctrl_suspend, - .resume = &tegra_pinctrl_resume + .suspend_noirq = &tegra_pinctrl_suspend, + .resume_noirq = &tegra_pinctrl_resume }; static bool gpio_node_has_range(const char *compatible) diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c index 25ca2c894b4d..ab0662a33b41 100644 --- a/drivers/platform/chrome/cros_ec_ishtp.c +++ b/drivers/platform/chrome/cros_ec_ishtp.c @@ -645,8 +645,10 @@ static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device) /* Register croc_ec_dev mfd */ rv = cros_ec_dev_init(client_data); - if (rv) + if (rv) { + down_write(&init_lock); goto end_cros_ec_dev_init_error; + } return 0; diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 0d42477946f3..59b78a181723 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -110,6 +110,16 @@ static struct quirk_entry quirk_asus_forceals = { .wmi_force_als_set = true, }; +static struct quirk_entry quirk_asus_ga401i = { + .wmi_backlight_power = true, + .wmi_backlight_set_devstate = true, +}; + +static struct quirk_entry quirk_asus_ga502i = { + .wmi_backlight_power = true, + .wmi_backlight_set_devstate = true, +}; + static int dmi_matched(const struct dmi_system_id *dmi) { pr_info("Identified laptop model '%s'\n", dmi->ident); @@ -411,6 +421,78 @@ static const struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_forceals, }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA401IH", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA401IH"), + }, + .driver_data = &quirk_asus_ga401i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA401II", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA401II"), + }, + .driver_data = &quirk_asus_ga401i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA401IU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA401IU"), + }, + .driver_data = &quirk_asus_ga401i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA401IV", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA401IV"), + }, + .driver_data = &quirk_asus_ga401i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA401IVC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA401IVC"), + }, + .driver_data = &quirk_asus_ga401i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA502II", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA502II"), + }, + .driver_data = &quirk_asus_ga502i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA502IU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA502IU"), + }, + .driver_data = &quirk_asus_ga502i, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. GA502IV", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA502IV"), + }, + .driver_data = &quirk_asus_ga502i, + }, {}, }; diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 41e28552b2ce..ed83fb135bab 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -111,6 +111,8 @@ struct bios_args { u32 arg0; u32 arg1; u32 arg2; /* At least TUF Gaming series uses 3 dword input buffer. */ + u32 arg4; + u32 arg5; } __packed; /* @@ -422,6 +424,7 @@ static int asus_wmi_battery_add(struct power_supply *battery) * battery is named BATT. */ if (strcmp(battery->desc->name, "BAT0") != 0 && + strcmp(battery->desc->name, "BAT1") != 0 && strcmp(battery->desc->name, "BATT") != 0) return -ENODEV; diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 74e988f839e8..4c1dd1d4e60b 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -2204,10 +2204,13 @@ static int __init dell_init(void) dell_laptop_register_notifier(&dell_laptop_notifier); - micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE); - ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev); - if (ret < 0) - goto fail_led; + if (dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE) && + dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE)) { + micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE); + ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev); + if (ret < 0) + goto fail_led; + } if (acpi_video_get_backlight_type() != acpi_backlight_vendor) return 0; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index a881b709af25..a44a2ec33287 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -461,8 +461,14 @@ static ssize_t postcode_show(struct device *dev, struct device_attribute *attr, static ssize_t als_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - u32 tmp = simple_strtoul(buf, NULL, 10); - int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp, + u32 tmp; + int ret; + + ret = kstrtou32(buf, 10, &tmp); + if (ret) + return ret; + + ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp, sizeof(tmp), sizeof(tmp)); if (ret) return ret < 0 ? ret : -EINVAL; diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index ef6d4bd77b1a..ad1399dcb21f 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -77,6 +77,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Wacom MobileStudio Pro 16"), }, }, + { + .ident = "HP Spectre x2 (2015)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"), + }, + }, { } }; @@ -563,7 +570,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; if (acpi_match_device_ids(dev, ids) == 0) - if (acpi_create_platform_device(dev, NULL)) + if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL))) dev_info(&dev->dev, "intel-hid: created platform device\n"); diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index b74932307d69..3393ee95077f 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -39,28 +39,51 @@ static const struct key_entry intel_vbtn_keymap[] = { { KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* volume-down key release */ { KE_KEY, 0xC8, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key press */ { KE_KEY, 0xC9, { KEY_ROTATE_LOCK_TOGGLE } }, /* rotate-lock key release */ +}; + +static const struct key_entry intel_vbtn_switchmap[] = { { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */ { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */ - { KE_END }, }; +#define KEYMAP_LEN \ + (ARRAY_SIZE(intel_vbtn_keymap) + ARRAY_SIZE(intel_vbtn_switchmap) + 1) + struct intel_vbtn_priv { + struct key_entry keymap[KEYMAP_LEN]; struct input_dev *input_dev; + bool has_switches; bool wakeup_mode; }; static int intel_vbtn_input_setup(struct platform_device *device) { struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); - int ret; + int ret, keymap_len = 0; + + if (true) { + memcpy(&priv->keymap[keymap_len], intel_vbtn_keymap, + ARRAY_SIZE(intel_vbtn_keymap) * + sizeof(struct key_entry)); + keymap_len += ARRAY_SIZE(intel_vbtn_keymap); + } + + if (priv->has_switches) { + memcpy(&priv->keymap[keymap_len], intel_vbtn_switchmap, + ARRAY_SIZE(intel_vbtn_switchmap) * + sizeof(struct key_entry)); + keymap_len += ARRAY_SIZE(intel_vbtn_switchmap); + } + + priv->keymap[keymap_len].type = KE_END; priv->input_dev = devm_input_allocate_device(&device->dev); if (!priv->input_dev) return -ENOMEM; - ret = sparse_keymap_setup(priv->input_dev, intel_vbtn_keymap, NULL); + ret = sparse_keymap_setup(priv->input_dev, priv->keymap, NULL); if (ret) return ret; @@ -115,31 +138,40 @@ out_unknown: static void detect_tablet_mode(struct platform_device *device) { - const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); acpi_handle handle = ACPI_HANDLE(&device->dev); - struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; + unsigned long long vgbs; acpi_status status; int m; - if (!(chassis_type && strcmp(chassis_type, "31") == 0)) - goto out; - - status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output); + status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs); if (ACPI_FAILURE(status)) - goto out; + return; - obj = vgbs_output.pointer; - if (!(obj && obj->type == ACPI_TYPE_INTEGER)) - goto out; - - m = !(obj->integer.value & TABLET_MODE_FLAG); + m = !(vgbs & TABLET_MODE_FLAG); input_report_switch(priv->input_dev, SW_TABLET_MODE, m); - m = (obj->integer.value & DOCK_MODE_FLAG) ? 1 : 0; + m = (vgbs & DOCK_MODE_FLAG) ? 1 : 0; input_report_switch(priv->input_dev, SW_DOCK, m); -out: - kfree(vgbs_output.pointer); +} + +static bool intel_vbtn_has_switches(acpi_handle handle) +{ + const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); + unsigned long long vgbs; + acpi_status status; + + /* + * Some normal laptops have a VGBS method despite being non-convertible + * and their VGBS method always returns 0, causing detect_tablet_mode() + * to report SW_TABLET_MODE=1 to userspace, which causes issues. + * These laptops have a DMI chassis_type of 9 ("Laptop"), do not report + * switches on any devices with a DMI chassis_type of 9. + */ + if (chassis_type && strcmp(chassis_type, "9") == 0) + return false; + + status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs); + return ACPI_SUCCESS(status); } static int intel_vbtn_probe(struct platform_device *device) @@ -160,13 +192,16 @@ static int intel_vbtn_probe(struct platform_device *device) return -ENOMEM; dev_set_drvdata(&device->dev, priv); + priv->has_switches = intel_vbtn_has_switches(handle); + err = intel_vbtn_input_setup(device); if (err) { pr_err("Failed to setup Intel Virtual Button\n"); return err; } - detect_tablet_mode(device); + if (priv->has_switches) + detect_tablet_mode(device); status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY, @@ -251,7 +286,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; if (acpi_match_device_ids(dev, ids) == 0) - if (acpi_create_platform_device(dev, NULL)) + if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL))) dev_info(&dev->dev, "intel-vbtn: created platform device\n"); diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h index 1409a5bb5582..4f6f7f0761fc 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h @@ -13,6 +13,9 @@ #define INTEL_RAPL_PRIO_DEVID_0 0x3451 #define INTEL_CFG_MBOX_DEVID_0 0x3459 +#define INTEL_RAPL_PRIO_DEVID_1 0x3251 +#define INTEL_CFG_MBOX_DEVID_1 0x3259 + /* * Validate maximum commands in a single request. * This is enough to handle command to every core in one ioctl, or all diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c index de4169d0796b..9a055fd54053 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c @@ -148,6 +148,7 @@ static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume) static const struct pci_device_id isst_if_mbox_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_0)}, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_1)}, { 0 }, }; MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids); diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c index ad8c7c0df4d9..e3778204b7a6 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c @@ -72,6 +72,7 @@ static long isst_if_mmio_rd_wr(u8 *cmd_ptr, int *write_only, int resume) static const struct pci_device_id isst_if_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_0)}, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_1)}, { 0 }, }; MODULE_DEVICE_TABLE(pci, isst_if_ids); diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c index 90cbaa8341e3..0bf9ab8653ae 100644 --- a/drivers/power/reset/vexpress-poweroff.c +++ b/drivers/power/reset/vexpress-poweroff.c @@ -143,6 +143,7 @@ static struct platform_driver vexpress_reset_driver = { .driver = { .name = "vexpress-reset", .of_match_table = vexpress_reset_of_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c index 5ca047b3f58f..23e7d6447ae9 100644 --- a/drivers/power/supply/88pm860x_battery.c +++ b/drivers/power/supply/88pm860x_battery.c @@ -433,7 +433,7 @@ static void pm860x_init_battery(struct pm860x_battery_info *info) int ret; int data; int bat_remove; - int soc; + int soc = 0; /* measure enable on GPADC1 */ data = MEAS1_GP1; @@ -496,7 +496,9 @@ static void pm860x_init_battery(struct pm860x_battery_info *info) } mutex_unlock(&info->lock); - calc_soc(info, OCV_MODE_ACTIVE, &soc); + ret = calc_soc(info, OCV_MODE_ACTIVE, &soc); + if (ret < 0) + goto out; data = pm860x_reg_read(info->i2c, PM8607_POWER_UP_LOG); bat_remove = data & BAT_WU_LOG; diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index 67cf7d28eed1..a074fc75c14e 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -585,7 +585,7 @@ config CHARGER_BQ24257 tristate "TI BQ24250/24251/24257 battery charger driver" depends on I2C depends on GPIOLIB || COMPILE_TEST - depends on REGMAP_I2C + select REGMAP_I2C help Say Y to enable support for the TI BQ24250, BQ24251, and BQ24257 battery chargers. diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c index 84a206f42a8e..e7931ffb7151 100644 --- a/drivers/power/supply/lp8788-charger.c +++ b/drivers/power/supply/lp8788-charger.c @@ -572,27 +572,14 @@ static void lp8788_setup_adc_channel(struct device *dev, return; /* ADC channel for battery voltage */ - chan = iio_channel_get(dev, pdata->adc_vbatt); + chan = devm_iio_channel_get(dev, pdata->adc_vbatt); pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan; /* ADC channel for battery temperature */ - chan = iio_channel_get(dev, pdata->adc_batt_temp); + chan = devm_iio_channel_get(dev, pdata->adc_batt_temp); pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan; } -static void lp8788_release_adc_channel(struct lp8788_charger *pchg) -{ - int i; - - for (i = 0; i < LP8788_NUM_CHG_ADC; i++) { - if (!pchg->chan[i]) - continue; - - iio_channel_release(pchg->chan[i]); - pchg->chan[i] = NULL; - } -} - static ssize_t lp8788_show_charger_status(struct device *dev, struct device_attribute *attr, char *buf) { @@ -735,7 +722,6 @@ static int lp8788_charger_remove(struct platform_device *pdev) flush_work(&pchg->charger_work); lp8788_irq_unregister(pdev, pchg); lp8788_psy_unregister(pchg); - lp8788_release_adc_channel(pchg); return 0; } diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c index 75cf861ba492..2e7e2b73b012 100644 --- a/drivers/power/supply/power_supply_hwmon.c +++ b/drivers/power/supply/power_supply_hwmon.c @@ -144,7 +144,7 @@ static int power_supply_hwmon_read_string(struct device *dev, u32 attr, int channel, const char **str) { - *str = channel ? "temp" : "temp ambient"; + *str = channel ? "temp ambient" : "temp"; return 0; } @@ -304,7 +304,7 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy) goto error; } - ret = devm_add_action(dev, power_supply_hwmon_bitmap_free, + ret = devm_add_action_or_reset(dev, power_supply_hwmon_bitmap_free, psyhw->props); if (ret) goto error; diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c index c1d124b8be0c..d102921b3ab2 100644 --- a/drivers/power/supply/smb347-charger.c +++ b/drivers/power/supply/smb347-charger.c @@ -1138,6 +1138,7 @@ static bool smb347_volatile_reg(struct device *dev, unsigned int reg) switch (reg) { case IRQSTAT_A: case IRQSTAT_C: + case IRQSTAT_D: case IRQSTAT_E: case IRQSTAT_F: case STAT_A: diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c index 1f829edd8ee7..d392a828fc49 100644 --- a/drivers/pwm/pwm-bcm-iproc.c +++ b/drivers/pwm/pwm-bcm-iproc.c @@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm, u64 tmp, multi, rate; u32 value, prescale; - rate = clk_get_rate(ip->clk); - value = readl(ip->base + IPROC_PWM_CTRL_OFFSET); if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm))) @@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm, else state->polarity = PWM_POLARITY_INVERSED; + rate = clk_get_rate(ip->clk); + if (rate == 0) { + state->period = 0; + state->duty_cycle = 0; + return; + } + value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET); prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm); prescale &= IPROC_PWM_PRESCALE_MAX; diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c index c9e57bd109fb..599a0f66a384 100644 --- a/drivers/pwm/pwm-img.c +++ b/drivers/pwm/pwm-img.c @@ -129,8 +129,10 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, duty = DIV_ROUND_UP(timebase * duty_ns, period_ns); ret = pm_runtime_get_sync(chip->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(chip->dev); return ret; + } val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm)); @@ -331,8 +333,10 @@ static int img_pwm_remove(struct platform_device *pdev) int ret; ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(&pdev->dev); return ret; + } for (i = 0; i < pwm_chip->chip.npwm; i++) { val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c index 9d78cc21cb12..77c28313e95f 100644 --- a/drivers/pwm/pwm-jz4740.c +++ b/drivers/pwm/pwm-jz4740.c @@ -92,11 +92,12 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, { struct jz4740_pwm_chip *jz4740 = to_jz4740(pwm->chip); unsigned long long tmp; - unsigned long period, duty; + unsigned long rate, period, duty; unsigned int prescaler = 0; uint16_t ctrl; - tmp = (unsigned long long)clk_get_rate(jz4740->clk) * state->period; + rate = clk_get_rate(jz4740->clk); + tmp = rate * state->period; do_div(tmp, 1000000000); period = tmp; @@ -108,8 +109,8 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, if (prescaler == 6) return -EINVAL; - tmp = (unsigned long long)period * state->duty_cycle; - do_div(tmp, state->period); + tmp = (unsigned long long)rate * state->duty_cycle; + do_div(tmp, NSEC_PER_SEC); duty = period - tmp; if (duty >= period) diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index 677d1aff61b7..788e7830771b 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig @@ -37,7 +37,7 @@ config RAPIDIO_ENABLE_RX_TX_PORTS config RAPIDIO_DMA_ENGINE bool "DMA Engine support for RapidIO" depends on RAPIDIO - select DMADEVICES + depends on DMADEVICES select DMA_ENGINE help Say Y here if you want to use DMA Engine frameork for RapidIO data diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 8cfb35b58f7a..df75edbe734e 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -235,8 +235,8 @@ static bool regulator_supply_is_couple(struct regulator_dev *rdev) static void regulator_unlock_recursive(struct regulator_dev *rdev, unsigned int n_coupled) { - struct regulator_dev *c_rdev; - int i; + struct regulator_dev *c_rdev, *supply_rdev; + int i, supply_n_coupled; for (i = n_coupled; i > 0; i--) { c_rdev = rdev->coupling_desc.coupled_rdevs[i - 1]; @@ -244,10 +244,13 @@ static void regulator_unlock_recursive(struct regulator_dev *rdev, if (!c_rdev) continue; - if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) - regulator_unlock_recursive( - c_rdev->supply->rdev, - c_rdev->coupling_desc.n_coupled); + if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) { + supply_rdev = c_rdev->supply->rdev; + supply_n_coupled = supply_rdev->coupling_desc.n_coupled; + + regulator_unlock_recursive(supply_rdev, + supply_n_coupled); + } regulator_unlock(c_rdev); } @@ -1456,7 +1459,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, const char *consumer_dev_name, const char *supply) { - struct regulator_map *node; + struct regulator_map *node, *new_node; int has_dev; if (supply == NULL) @@ -1467,6 +1470,22 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, else has_dev = 0; + new_node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); + if (new_node == NULL) + return -ENOMEM; + + new_node->regulator = rdev; + new_node->supply = supply; + + if (has_dev) { + new_node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); + if (new_node->dev_name == NULL) { + kfree(new_node); + return -ENOMEM; + } + } + + mutex_lock(®ulator_list_mutex); list_for_each_entry(node, ®ulator_map_list, list) { if (node->dev_name && consumer_dev_name) { if (strcmp(node->dev_name, consumer_dev_name) != 0) @@ -1484,26 +1503,19 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, node->regulator->desc->name, supply, dev_name(&rdev->dev), rdev_get_name(rdev)); - return -EBUSY; + goto fail; } - node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); - if (node == NULL) - return -ENOMEM; + list_add(&new_node->list, ®ulator_map_list); + mutex_unlock(®ulator_list_mutex); - node->regulator = rdev; - node->supply = supply; - - if (has_dev) { - node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); - if (node->dev_name == NULL) { - kfree(node); - return -ENOMEM; - } - } - - list_add(&node->list, ®ulator_map_list); return 0; + +fail: + mutex_unlock(®ulator_list_mutex); + kfree(new_node->dev_name); + kfree(new_node); + return -EBUSY; } static void unset_regulator_supplies(struct regulator_dev *rdev) @@ -1575,44 +1587,53 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, const char *supply_name) { struct regulator *regulator; - char buf[REG_STR_SIZE]; - int err, size; + int err; + + if (dev) { + char buf[REG_STR_SIZE]; + int size; + + size = snprintf(buf, REG_STR_SIZE, "%s-%s", + dev->kobj.name, supply_name); + if (size >= REG_STR_SIZE) + return NULL; + + supply_name = kstrdup(buf, GFP_KERNEL); + if (supply_name == NULL) + return NULL; + } else { + supply_name = kstrdup_const(supply_name, GFP_KERNEL); + if (supply_name == NULL) + return NULL; + } regulator = kzalloc(sizeof(*regulator), GFP_KERNEL); - if (regulator == NULL) + if (regulator == NULL) { + kfree(supply_name); return NULL; + } + + regulator->rdev = rdev; + regulator->supply_name = supply_name; regulator_lock(rdev); - regulator->rdev = rdev; list_add(®ulator->list, &rdev->consumer_list); + regulator_unlock(rdev); if (dev) { regulator->dev = dev; /* Add a link to the device sysfs entry */ - size = snprintf(buf, REG_STR_SIZE, "%s-%s", - dev->kobj.name, supply_name); - if (size >= REG_STR_SIZE) - goto overflow_err; - - regulator->supply_name = kstrdup(buf, GFP_KERNEL); - if (regulator->supply_name == NULL) - goto overflow_err; - err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj, - buf); + supply_name); if (err) { rdev_dbg(rdev, "could not add device link %s err %d\n", dev->kobj.name, err); /* non-fatal */ } - } else { - regulator->supply_name = kstrdup_const(supply_name, GFP_KERNEL); - if (regulator->supply_name == NULL) - goto overflow_err; } - regulator->debugfs = debugfs_create_dir(regulator->supply_name, + regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs); if (!regulator->debugfs) { rdev_dbg(rdev, "Failed to create debugfs directory\n"); @@ -1637,13 +1658,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, _regulator_is_enabled(rdev)) regulator->always_on = true; - regulator_unlock(rdev); return regulator; -overflow_err: - list_del(®ulator->list); - kfree(regulator); - regulator_unlock(rdev); - return NULL; } static int _regulator_get_enable_time(struct regulator_dev *rdev) @@ -2217,10 +2232,13 @@ EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias); static int regulator_ena_gpio_request(struct regulator_dev *rdev, const struct regulator_config *config) { - struct regulator_enable_gpio *pin; + struct regulator_enable_gpio *pin, *new_pin; struct gpio_desc *gpiod; gpiod = config->ena_gpiod; + new_pin = kzalloc(sizeof(*new_pin), GFP_KERNEL); + + mutex_lock(®ulator_list_mutex); list_for_each_entry(pin, ®ulator_ena_gpio_list, list) { if (pin->gpiod == gpiod) { @@ -2229,9 +2247,13 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev, } } - pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL); - if (pin == NULL) + if (new_pin == NULL) { + mutex_unlock(®ulator_list_mutex); return -ENOMEM; + } + + pin = new_pin; + new_pin = NULL; pin->gpiod = gpiod; list_add(&pin->list, ®ulator_ena_gpio_list); @@ -2239,6 +2261,10 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev, update_ena_gpio_to_rdev: pin->request_count++; rdev->ena_pin = pin; + + mutex_unlock(®ulator_list_mutex); + kfree(new_pin); + return 0; } @@ -4868,13 +4894,9 @@ static void regulator_resolve_coupling(struct regulator_dev *rdev) return; } - regulator_lock(c_rdev); - c_desc->coupled_rdevs[i] = c_rdev; c_desc->n_resolved++; - regulator_unlock(c_rdev); - regulator_resolve_coupling(c_rdev); } } @@ -4959,7 +4981,10 @@ static int regulator_init_coupling(struct regulator_dev *rdev) if (!of_check_coupling_data(rdev)) return -EPERM; + mutex_lock(®ulator_list_mutex); rdev->coupling_desc.coupler = regulator_find_coupler(rdev); + mutex_unlock(®ulator_list_mutex); + if (IS_ERR(rdev->coupling_desc.coupler)) { err = PTR_ERR(rdev->coupling_desc.coupler); rdev_err(rdev, "failed to get coupler: %d\n", err); @@ -5005,7 +5030,6 @@ regulator_register(const struct regulator_desc *regulator_desc, struct regulator_dev *rdev; bool dangling_cfg_gpiod = false; bool dangling_of_gpiod = false; - bool reg_device_fail = false; struct device *dev; int ret, i; @@ -5055,6 +5079,7 @@ regulator_register(const struct regulator_desc *regulator_desc, ret = -ENOMEM; goto rinse; } + device_initialize(&rdev->dev); /* * Duplicate the config so the driver could override it after @@ -5062,9 +5087,8 @@ regulator_register(const struct regulator_desc *regulator_desc, */ config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL); if (config == NULL) { - kfree(rdev); ret = -ENOMEM; - goto rinse; + goto clean; } init_data = regulator_of_get_init_data(dev, regulator_desc, config, @@ -5076,10 +5100,8 @@ regulator_register(const struct regulator_desc *regulator_desc, * from a gpio extender or something else. */ if (PTR_ERR(init_data) == -EPROBE_DEFER) { - kfree(config); - kfree(rdev); ret = -EPROBE_DEFER; - goto rinse; + goto clean; } /* @@ -5120,9 +5142,7 @@ regulator_register(const struct regulator_desc *regulator_desc, } if (config->ena_gpiod) { - mutex_lock(®ulator_list_mutex); ret = regulator_ena_gpio_request(rdev, config); - mutex_unlock(®ulator_list_mutex); if (ret != 0) { rdev_err(rdev, "Failed to request enable GPIO: %d\n", ret); @@ -5138,6 +5158,7 @@ regulator_register(const struct regulator_desc *regulator_desc, rdev->dev.parent = dev; dev_set_name(&rdev->dev, "regulator.%lu", (unsigned long) atomic_inc_return(®ulator_no)); + dev_set_drvdata(&rdev->dev, rdev); /* set regulator constraints */ if (init_data) @@ -5160,27 +5181,22 @@ regulator_register(const struct regulator_desc *regulator_desc, if (ret < 0) goto wash; - mutex_lock(®ulator_list_mutex); ret = regulator_init_coupling(rdev); - mutex_unlock(®ulator_list_mutex); if (ret < 0) goto wash; /* add consumers devices */ if (init_data) { - mutex_lock(®ulator_list_mutex); for (i = 0; i < init_data->num_consumer_supplies; i++) { ret = set_consumer_device_supply(rdev, init_data->consumer_supplies[i].dev_name, init_data->consumer_supplies[i].supply); if (ret < 0) { - mutex_unlock(®ulator_list_mutex); dev_err(dev, "Failed to set supply %s\n", init_data->consumer_supplies[i].supply); goto unset_supplies; } } - mutex_unlock(®ulator_list_mutex); } if (!rdev->desc->ops->get_voltage && @@ -5188,12 +5204,9 @@ regulator_register(const struct regulator_desc *regulator_desc, !rdev->desc->fixed_uV) rdev->is_switch = true; - dev_set_drvdata(&rdev->dev, rdev); - ret = device_register(&rdev->dev); - if (ret != 0) { - reg_device_fail = true; + ret = device_add(&rdev->dev); + if (ret != 0) goto unset_supplies; - } rdev_init_debugfs(rdev); @@ -5215,18 +5228,14 @@ unset_supplies: mutex_unlock(®ulator_list_mutex); wash: kfree(rdev->coupling_desc.coupled_rdevs); - kfree(rdev->constraints); mutex_lock(®ulator_list_mutex); regulator_ena_gpio_free(rdev); mutex_unlock(®ulator_list_mutex); clean: if (dangling_of_gpiod) gpiod_put(config->ena_gpiod); - if (reg_device_fail) - put_device(&rdev->dev); - else - kfree(rdev); kfree(config); + put_device(&rdev->dev); rinse: if (dangling_cfg_gpiod) gpiod_put(cfg->ena_gpiod); diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index c34588d9f3aa..0398b57692e9 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -220,6 +220,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = { }; +static const struct regulator_ops pfuze3000_sw_regulator_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_ascend, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = pfuze100_set_ramp_delay, + +}; + #define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \ [_chip ## _ ## _name] = { \ .desc = { \ @@ -329,23 +342,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = { .stby_mask = 0x20, \ } - -#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \ - .desc = { \ - .name = #_name,\ - .n_voltages = ((max) - (min)) / (step) + 1, \ - .ops = &pfuze100_sw_regulator_ops, \ - .type = REGULATOR_VOLTAGE, \ - .id = _chip ## _ ## _name, \ - .owner = THIS_MODULE, \ - .min_uV = (min), \ - .uV_step = (step), \ - .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \ - .vsel_mask = 0x7, \ - }, \ - .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \ - .stby_mask = 0x7, \ -} +/* No linar case for the some switches of PFUZE3000 */ +#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages) \ + [_chip ## _ ## _name] = { \ + .desc = { \ + .name = #_name, \ + .n_voltages = ARRAY_SIZE(voltages), \ + .ops = &pfuze3000_sw_regulator_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = _chip ## _ ## _name, \ + .owner = THIS_MODULE, \ + .volt_table = voltages, \ + .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \ + .vsel_mask = (mask), \ + .enable_reg = (base) + PFUZE100_MODE_OFFSET, \ + .enable_mask = 0xf, \ + .enable_val = 0x8, \ + .enable_time = 500, \ + }, \ + .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \ + .stby_mask = (mask), \ + .sw_reg = true, \ + } #define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \ .desc = { \ @@ -402,9 +420,9 @@ static struct pfuze_regulator pfuze200_regulators[] = { }; static struct pfuze_regulator pfuze3000_regulators[] = { - PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), + PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000), - PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), + PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000), PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst), PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), @@ -418,8 +436,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = { }; static struct pfuze_regulator pfuze3001_regulators[] = { - PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), - PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), + PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), + PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000), PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000), diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c index e74e11101fc1..0a9d61a91f43 100644 --- a/drivers/regulator/pwm-regulator.c +++ b/drivers/regulator/pwm-regulator.c @@ -279,7 +279,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev, return ret; } - drvdata->state = -EINVAL; + drvdata->state = -ENOTRECOVERABLE; drvdata->duty_cycle_table = duty_cycle_table; drvdata->desc.ops = &pwm_regulator_voltage_table_ops; drvdata->desc.n_voltages = length / sizeof(*duty_cycle_table); diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index 0246b6f99fb5..f11e4bfbc91b 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -832,11 +832,11 @@ static const struct rpmh_vreg_init_data pm8150_vreg_data[] = { RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l2-l10"), RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l1-l8-l11"), RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"), - RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l6-l17"), + RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l13-l16-l17"), RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"), RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo_lv, "vdd-l7-l12-l14-l15"), - RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l6-l17"), - RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l6-l17"), + RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l13-l16-l17"), + RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l13-l16-l17"), RPMH_VREG("ldo18", "ldo%s18", &pmic5_nldo, "vdd-l3-l4-l5-l18"), {}, }; @@ -857,7 +857,7 @@ static const struct rpmh_vreg_init_data pm8150l_vreg_data[] = { RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l4-l5-l6"), RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l4-l5-l6"), RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l7-l11"), - RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l1-l8-l11"), + RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l1-l8"), RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l9-l10"), RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l9-l10"), RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l7-l11"), diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c index cb0f4a0be032..eaeb6aee6da5 100644 --- a/drivers/remoteproc/qcom_q6v5.c +++ b/drivers/remoteproc/qcom_q6v5.c @@ -151,6 +151,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5) { int ret; + q6v5->running = false; + qcom_smem_state_update_bits(q6v5->state, BIT(q6v5->stop_bit), BIT(q6v5->stop_bit)); diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 6ba065d5c4d9..a67c55785b4d 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -381,6 +381,12 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw) { struct q6v5 *qproc = rproc->priv; + /* MBA is restricted to a maximum size of 1M */ + if (fw->size > qproc->mba_size || fw->size > SZ_1M) { + dev_err(qproc->dev, "MBA firmware load failed\n"); + return -EINVAL; + } + memcpy(qproc->mba_region, fw->data, fw->size); return 0; @@ -1005,7 +1011,13 @@ static int q6v5_mpss_load(struct q6v5 *qproc) goto release_firmware; } - ptr = qproc->mpss_region + offset; + ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz); + if (!ptr) { + dev_err(qproc->dev, + "unable to map memory region: %pa+%zx-%x\n", + &qproc->mpss_phys, offset, phdr->p_memsz); + goto release_firmware; + } if (phdr->p_filesz && phdr->p_offset < fw->size) { /* Firmware is large enough to be non-split */ @@ -1014,6 +1026,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc) "failed to load segment %d from truncated file %s\n", i, fw_name); ret = -EINVAL; + iounmap(ptr); goto release_firmware; } @@ -1021,14 +1034,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc) } else if (phdr->p_filesz) { /* Replace "xxx.xxx" with "xxx.bxx" */ sprintf(fw_name + fw_name_len - 3, "b%02d", i); - ret = request_firmware(&seg_fw, fw_name, qproc->dev); + ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev, + ptr, phdr->p_filesz); if (ret) { dev_err(qproc->dev, "failed to load %s\n", fw_name); + iounmap(ptr); goto release_firmware; } - memcpy(ptr, seg_fw->data, seg_fw->size); - release_firmware(seg_fw); } @@ -1036,6 +1049,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc) memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz); } + iounmap(ptr); size += phdr->p_memsz; } @@ -1075,7 +1089,8 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc, int ret = 0; struct q6v5 *qproc = rproc->priv; unsigned long mask = BIT((unsigned long)segment->priv); - void *ptr = rproc_da_to_va(rproc, segment->da, segment->size); + int offset = segment->da - qproc->mpss_reloc; + void *ptr = NULL; /* Unlock mba before copying segments */ if (!qproc->dump_mba_loaded) { @@ -1089,10 +1104,15 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc, } } - if (!ptr || ret) - memset(dest, 0xff, segment->size); - else + if (!ret) + ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size); + + if (ptr) { memcpy(dest, ptr, segment->size); + iounmap(ptr); + } else { + memset(dest, 0xff, segment->size); + } qproc->dump_segment_mask |= mask; @@ -1393,12 +1413,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) qproc->mpss_phys = qproc->mpss_reloc = r.start; qproc->mpss_size = resource_size(&r); - qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size); - if (!qproc->mpss_region) { - dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", - &r.start, qproc->mpss_size); - return -EBUSY; - } return 0; } diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index b91a0fb1a8ce..8b5a3465d23e 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -2096,6 +2096,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, rproc->dev.type = &rproc_type; rproc->dev.class = &rproc_class; rproc->dev.driver_data = rproc; + idr_init(&rproc->notifyids); /* Assign a unique device index and name */ rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); @@ -2120,8 +2121,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, mutex_init(&rproc->lock); - idr_init(&rproc->notifyids); - INIT_LIST_HEAD(&rproc->carveouts); INIT_LIST_HEAD(&rproc->mappings); INIT_LIST_HEAD(&rproc->traces); diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c index 1a3420ee6a4d..d5083b013fbc 100644 --- a/drivers/rtc/rtc-goldfish.c +++ b/drivers/rtc/rtc-goldfish.c @@ -73,6 +73,7 @@ static int goldfish_rtc_set_alarm(struct device *dev, rtc_alarm64 = rtc_tm_to_time64(&alrm->time) * NSEC_PER_SEC; writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH); writel(rtc_alarm64, base + TIMER_ALARM_LOW); + writel(1, base + TIMER_IRQ_ENABLED); } else { /* * if this function was called with enabled=0 diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c index afce2c0b4bd6..d6802e6191cb 100644 --- a/drivers/rtc/rtc-mc13xxx.c +++ b/drivers/rtc/rtc-mc13xxx.c @@ -308,8 +308,10 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev) mc13xxx_unlock(mc13xxx); ret = rtc_register_device(priv->rtc); - if (ret) + if (ret) { + mc13xxx_lock(mc13xxx); goto err_irq_request; + } return 0; diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c index 2b316661a578..bbdfebd70644 100644 --- a/drivers/rtc/rtc-rv3028.c +++ b/drivers/rtc/rtc-rv3028.c @@ -625,6 +625,8 @@ static int rv3028_probe(struct i2c_client *client) return -ENOMEM; rv3028->regmap = devm_regmap_init_i2c(client, ®map_config); + if (IS_ERR(rv3028->regmap)) + return PTR_ERR(rv3028->regmap); i2c_set_clientdata(client, rv3028); diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 8d4971645cf1..f7ae03fd36cb 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -319,7 +319,7 @@ dasd_diag_check_device(struct dasd_device *device) struct dasd_diag_characteristics *rdc_data; struct vtoc_cms_label *label; struct dasd_block *block; - struct dasd_diag_bio bio; + struct dasd_diag_bio *bio; unsigned int sb, bsize; blocknum_t end_block; int rc; @@ -395,29 +395,36 @@ dasd_diag_check_device(struct dasd_device *device) rc = -ENOMEM; goto out; } + bio = kzalloc(sizeof(*bio), GFP_KERNEL); + if (bio == NULL) { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "No memory to allocate initialization bio"); + rc = -ENOMEM; + goto out_label; + } rc = 0; end_block = 0; /* try all sizes - needed for ECKD devices */ for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) { mdsk_init_io(device, bsize, 0, &end_block); - memset(&bio, 0, sizeof (struct dasd_diag_bio)); - bio.type = MDSK_READ_REQ; - bio.block_number = private->pt_block + 1; - bio.buffer = label; + memset(bio, 0, sizeof(*bio)); + bio->type = MDSK_READ_REQ; + bio->block_number = private->pt_block + 1; + bio->buffer = label; memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io)); private->iob.dev_nr = rdc_data->dev_nr; private->iob.key = 0; private->iob.flags = 0; /* do synchronous io */ private->iob.block_count = 1; private->iob.interrupt_params = 0; - private->iob.bio_list = &bio; + private->iob.bio_list = bio; private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; rc = dia250(&private->iob, RW_BIO); if (rc == 3) { pr_warn("%s: A 64-bit DIAG call failed\n", dev_name(&device->cdev->dev)); rc = -EOPNOTSUPP; - goto out_label; + goto out_bio; } mdsk_term_io(device); if (rc == 0) @@ -427,7 +434,7 @@ dasd_diag_check_device(struct dasd_device *device) pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n", dev_name(&device->cdev->dev), rc); rc = -EIO; - goto out_label; + goto out_bio; } /* check for label block */ if (memcmp(label->label_id, DASD_DIAG_CMS1, @@ -457,6 +464,8 @@ dasd_diag_check_device(struct dasd_device *device) (rc == 4) ? ", read-only device" : ""); rc = 0; } +out_bio: + kfree(bio); out_label: free_page((long) label); out: diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 831850435c23..5734a78dbb8e 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -677,6 +677,11 @@ static int slow_eval_known_fn(struct subchannel *sch, void *data) rc = css_evaluate_known_subchannel(sch, 1); if (rc == -EAGAIN) css_schedule_eval(sch->schid); + /* + * The loop might take long time for platforms with lots of + * known devices. Allow scheduling here. + */ + cond_resched(); } return 0; } diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index a58b45df95d7..3b0a4483a252 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -372,7 +372,6 @@ static inline int multicast_outbound(struct qdio_q *q) extern u64 last_ai_time; /* prototypes for thin interrupt */ -void qdio_setup_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr); void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index ee0b3c586211..9dc56aa3ae55 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -479,7 +479,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) setup_queues(irq_ptr, init_data); setup_qib(irq_ptr, init_data); - qdio_setup_thinint(irq_ptr); set_impl_params(irq_ptr, init_data->qib_param_field_format, init_data->qib_param_field, init_data->input_slib_elements, diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 93ee067c10ca..ddf780b12d40 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -268,17 +268,19 @@ int __init tiqdio_register_thinints(void) int qdio_establish_thinint(struct qdio_irq *irq_ptr) { + int rc; + if (!is_thinint_irq(irq_ptr)) return 0; - return set_subchannel_ind(irq_ptr, 0); -} -void qdio_setup_thinint(struct qdio_irq *irq_ptr) -{ - if (!is_thinint_irq(irq_ptr)) - return; irq_ptr->dsci = get_indicator(); DBF_HEX(&irq_ptr->dsci, sizeof(void *)); + + rc = set_subchannel_ind(irq_ptr, 0); + if (rc) + put_indicator(irq_ptr->dsci); + + return rc; } void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index 110fe9d0cb91..03999b06affd 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -1684,9 +1684,9 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, *nr_apqns = 0; /* fetch status of all crypto cards */ - device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); + device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, + sizeof(struct zcrypt_device_status_ext), + GFP_KERNEL); if (!device_status) return -ENOMEM; zcrypt_device_status_mask_ext(device_status); @@ -1754,7 +1754,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, verify = 0; } - kfree(device_status); + kvfree(device_status); return rc; } EXPORT_SYMBOL(cca_findcard2); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index fe70e9875bde..5043f0fcf399 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4163,9 +4163,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, int fallback = *(int *)reply->param; QETH_CARD_TEXT(card, 4, "setaccb"); - if (cmd->hdr.return_code) - return -EIO; - qeth_setadpparms_inspect_rc(cmd); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; QETH_CARD_TEXT_(card, 2, "rc=%d", @@ -4175,7 +4172,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", access_ctrl_req->subcmd_code, CARD_DEVID(card), cmd->data.setadapterparms.hdr.return_code); - switch (cmd->data.setadapterparms.hdr.return_code) { + switch (qeth_setadpparms_inspect_rc(cmd)) { case SET_ACCESS_CTRL_RC_SUCCESS: if (card->options.isolation == ISOLATION_MODE_NONE) { dev_info(&card->gdev->dev, diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 92bace3b28fd..4ce28aa490cd 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1199,6 +1199,10 @@ static void qeth_bridge_state_change(struct qeth_card *card, int extrasize; QETH_CARD_TEXT(card, 2, "brstchng"); + if (qports->num_entries == 0) { + QETH_CARD_TEXT(card, 2, "BPempty"); + return; + } if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); return; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index cb84125ab80d..08dc2efb7d8a 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -576,7 +576,10 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) ZFCP_STATUS_ERP_TIMEDOUT)) { req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; zfcp_dbf_rec_run("erscf_1", act); - req->erp_action = NULL; + /* lock-free concurrent access with + * zfcp_erp_timeout_handler() + */ + WRITE_ONCE(req->erp_action, NULL); } if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) zfcp_dbf_rec_run("erscf_2", act); @@ -612,8 +615,14 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask) void zfcp_erp_timeout_handler(struct timer_list *t) { struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); - struct zfcp_erp_action *act = fsf_req->erp_action; + struct zfcp_erp_action *act; + if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) + return; + /* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */ + act = READ_ONCE(fsf_req->erp_action); + if (!act) + return; zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT); } diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index cf63916814cc..5c652deb6fed 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -409,7 +409,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) return; } - del_timer(&req->timer); + del_timer_sync(&req->timer); zfcp_fsf_protstatus_eval(req); zfcp_fsf_fsfstatus_eval(req); req->handler(req); @@ -762,7 +762,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); req->issued = get_tod_clock(); if (zfcp_qdio_send(qdio, &req->qdio_req)) { - del_timer(&req->timer); + del_timer_sync(&req->timer); /* lookup request again, list might have changed */ zfcp_reqlist_find_rm(adapter->req_list, req_id); zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 90cf4691b8c3..9ea30fcb4428 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -114,15 +114,6 @@ config BLK_DEV_SR . The module will be called sr_mod. -config BLK_DEV_SR_VENDOR - bool "Enable vendor-specific extensions (for SCSI CDROM)" - depends on BLK_DEV_SR - help - This enables the usage of vendor specific SCSI commands. This is - required to support multisession CDs with old NEC/TOSHIBA cdrom - drives (and HP Writers). If you have such a drive and get the first - session only, try saying Y here; everybody else says N. - config CHR_DEV_SG tristate "SCSI generic support" depends on SCSI diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index d12dd89538df..deab66598910 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -2911,8 +2911,10 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id) ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); - if (!ashost->base || !ashost->fast) + if (!ashost->base || !ashost->fast) { + ret = -ENOMEM; goto out_put; + } host->irq = ec->irq; ashost->host = host; diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index a1f3e9ee4e63..14e1d001253c 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c @@ -450,7 +450,7 @@ static int cumanascsi2_probe(struct expansion_card *ec, if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); - free_irq(ec->irq, host); + free_irq(ec->irq, info); out_release: fas216_release(host); diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c index 134f040d58e2..f441ec8eb93d 100644 --- a/drivers/scsi/arm/eesox.c +++ b/drivers/scsi/arm/eesox.c @@ -571,7 +571,7 @@ static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id) if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); - free_irq(ec->irq, host); + free_irq(ec->irq, info); out_remove: fas216_remove(host); diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c index c795537a671c..2dc0df005cb3 100644 --- a/drivers/scsi/arm/powertec.c +++ b/drivers/scsi/arm/powertec.c @@ -378,7 +378,7 @@ static int powertecscsi_probe(struct expansion_card *ec, if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); - free_irq(ec->irq, host); + free_irq(ec->irq, info); out_release: fas216_release(host); diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 524cdbcd29aa..ec7d01f6e2d5 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -959,6 +959,7 @@ static int init_act_open(struct cxgbi_sock *csk) struct net_device *ndev = cdev->ports[csk->port_id]; struct cxgbi_hba *chba = cdev->hbas[csk->port_id]; struct sk_buff *skb = NULL; + int ret; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); @@ -979,16 +980,16 @@ static int init_act_open(struct cxgbi_sock *csk) csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); if (csk->atid < 0) { pr_err("NO atid available.\n"); - return -EINVAL; + ret = -EINVAL; + goto put_sock; } cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); if (!skb) { - cxgb3_free_atid(t3dev, csk->atid); - cxgbi_sock_put(csk); - return -ENOMEM; + ret = -ENOMEM; + goto free_atid; } skb->sk = (struct sock *)csk; set_arp_failure_handler(skb, act_open_arp_failure); @@ -1010,6 +1011,15 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); send_act_open_req(csk, skb, csk->l2t); return 0; + +free_atid: + cxgb3_free_atid(t3dev, csk->atid); +put_sock: + cxgbi_sock_put(csk); + l2t_release(t3dev, csk->l2t); + csk->l2t = NULL; + + return ret; } cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 1791a393795d..07a0dadc75bf 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -255,9 +255,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) WARN_ON(!fcf_dev); new->fcf_dev = NULL; fcoe_fcf_device_delete(fcf_dev); - kfree(new); mutex_unlock(&cdev->lock); } + kfree(new); } /** diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 6f4692f0d714..031aa4043c5e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -904,8 +904,11 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct device *dev = hisi_hba->dev; + dev_dbg(dev, "phy%d OOB ready\n", phy_no); + if (phy->phy_attached) + return; + if (!timer_pending(&phy->timer)) { - dev_dbg(dev, "phy%d OOB ready\n", phy_no); phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ; add_timer(&phy->timer); } diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 59f0f1030c54..c5711c659b51 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -415,6 +415,8 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, int rc = 0; struct vio_dev *vdev = to_vio_dev(hostdata->dev); + set_adapter_info(hostdata); + /* Re-enable the CRQ */ do { if (rc) diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c index e4857b728033..a64abe38db2d 100644 --- a/drivers/scsi/iscsi_boot_sysfs.c +++ b/drivers/scsi/iscsi_boot_sysfs.c @@ -352,7 +352,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset, boot_kobj->kobj.kset = boot_kset->kset; if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype, NULL, name, index)) { - kfree(boot_kobj); + kobject_put(&boot_kobj->kobj); return NULL; } boot_kobj->data = data; diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 2b865c6423e2..589ddf003886 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -581,8 +581,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, if (PTR_ERR(fp) == -FC_EX_CLOSED) goto out; - if (IS_ERR(fp)) - goto redisc; + if (IS_ERR(fp)) { + mutex_lock(&disc->disc_mutex); + fc_disc_restart(disc); + mutex_unlock(&disc->disc_mutex); + goto out; + } cp = fc_frame_payload_get(fp, sizeof(*cp)); if (!cp) @@ -609,7 +613,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, new_rdata->disc_id = disc->disc_id; fc_rport_login(new_rdata); } - goto out; + goto free_fp; } rdata->disc_id = disc->disc_id; mutex_unlock(&rdata->rp_mutex); @@ -626,10 +630,10 @@ redisc: fc_disc_restart(disc); mutex_unlock(&disc->disc_mutex); } +free_fp: + fc_frame_free(fp); out: kref_put(&rdata->kref, fc_rport_destroy); - if (!IS_ERR(fp)) - fc_frame_free(fp); } /** diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index e9e00740f7ca..dd755a56cf52 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -208,7 +208,10 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) task->num_scatter = si; } - task->data_dir = qc->dma_dir; + if (qc->tf.protocol == ATA_PROT_NODATA) + task->data_dir = DMA_NONE; + else + task->data_dir = qc->dma_dir; task->scatter = qc->sg; task->ata_task.retry_count = 1; task->task_state_flags = SAS_TASK_STATE_PENDING; diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index d7302c2052f9..10975f3f7ff6 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -182,10 +182,11 @@ int sas_notify_lldd_dev_found(struct domain_device *dev) pr_warn("driver on host %s cannot handle device %llx, error:%d\n", dev_name(sas_ha->dev), SAS_ADDR(dev->sas_addr), res); + return res; } set_bit(SAS_DEV_FOUND, &dev->state); kref_get(&dev->kref); - return res; + return 0; } diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 66f8867dd837..4e994a693e3f 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -4442,7 +4442,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, out: if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) { spin_lock_irq(shost->host_lock); - ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); + if (mbox) + ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; + ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; spin_unlock_irq(shost->host_lock); /* If the node is not being used by another discovery thread, @@ -8394,6 +8396,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, spin_lock_irq(shost->host_lock); if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { spin_unlock_irq(shost->host_lock); + if (newnode) + lpfc_nlp_put(ndlp); goto dropit; } spin_unlock_irq(shost->host_lock); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 14d9f41977f1..95abffd9ad10 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -11542,7 +11542,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) lpfc_sli4_xri_exchange_busy_wait(phba); /* per-phba callback de-registration for hotplug event */ - lpfc_cpuhp_remove(phba); + if (phba->pport) + lpfc_cpuhp_remove(phba); /* Disable PCI subsystem interrupt */ lpfc_sli4_disable_intr(phba); diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 9884228800a5..f14394ab0e03 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1923,7 +1923,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) } tgtp->tport_unreg_cmp = &tport_unreg_cmp; nvmet_fc_unregister_targetport(phba->targetport); - if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp, + if (!wait_for_completion_timeout(&tport_unreg_cmp, msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) lpfc_printf_log(phba, KERN_ERR, LOG_NVME, "6179 Unreg targetport x%px timeout " diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index b76646357980..d0296f7cf45f 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -642,27 +642,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) vport->port_state < LPFC_VPORT_READY) return -EAGAIN; } + /* - * This is a bit of a mess. We want to ensure the shost doesn't get - * torn down until we're done with the embedded lpfc_vport structure. - * - * Beyond holding a reference for this function, we also need a - * reference for outstanding I/O requests we schedule during delete - * processing. But once we scsi_remove_host() we can no longer obtain - * a reference through scsi_host_get(). - * - * So we take two references here. We release one reference at the - * bottom of the function -- after delinking the vport. And we - * release the other at the completion of the unreg_vpi that get's - * initiated after we've disposed of all other resources associated - * with the port. + * Take early refcount for outstanding I/O requests we schedule during + * delete processing for unreg_vpi. Always keep this before + * scsi_remove_host() as we can no longer obtain a reference through + * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL. */ if (!scsi_host_get(shost)) return VPORT_INVAL; - if (!scsi_host_get(shost)) { - scsi_host_put(shost); - return VPORT_INVAL; - } + lpfc_free_sysfs_attr(vport); lpfc_debugfs_terminate(vport); @@ -809,8 +798,9 @@ skip_logo: if (!(vport->vpi_state & LPFC_VPI_REGISTERED) || lpfc_mbx_unreg_vpi(vport)) scsi_host_put(shost); - } else + } else { scsi_host_put(shost); + } lpfc_free_vpi(phba, vport->vpi); vport->work_port_events = 0; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 0cbe6740e0c9..2c2966a297c7 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5586,9 +5586,13 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) &instance->irq_context[i])) { dev_err(&instance->pdev->dev, "Failed to register IRQ for vector %d.\n", i); - for (j = 0; j < i; j++) + for (j = 0; j < i; j++) { + if (j < instance->low_latency_index_start) + irq_set_affinity_hint( + pci_irq_vector(pdev, j), NULL); free_irq(pci_irq_vector(pdev, j), &instance->irq_context[j]); + } /* Retry irq register for IO_APIC*/ instance->msix_vectors = 0; instance->msix_load_balance = false; @@ -5626,6 +5630,9 @@ megasas_destroy_irqs(struct megasas_instance *instance) { if (instance->msix_vectors) for (i = 0; i < instance->msix_vectors; i++) { + if (i < instance->low_latency_index_start) + irq_set_affinity_hint( + pci_irq_vector(instance->pdev, i), NULL); free_irq(pci_irq_vector(instance->pdev, i), &instance->irq_context[i]); } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 3d48024082ba..5dcd7b9b72ce 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3738,7 +3738,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget) instance = irq_ctx->instance; if (irq_ctx->irq_line_enable) { - disable_irq(irq_ctx->os_irq); + disable_irq_nosync(irq_ctx->os_irq); irq_ctx->irq_line_enable = false; } @@ -3787,10 +3787,8 @@ static irqreturn_t megasas_isr_fusion(int irq, void *devp) if (instance->mask_interrupts) return IRQ_NONE; -#if defined(ENABLE_IRQ_POLL) if (irq_context->irq_poll_scheduled) return IRQ_HANDLED; -#endif if (!instance->msix_vectors) { mfiStatus = instance->instancet->clear_intr(instance); diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 74fb50644678..4dd50db90677 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1045,6 +1045,8 @@ static void handle_error(struct mesh_state *ms) while ((in_8(&mr->bus_status1) & BS1_RST) != 0) udelay(1); printk("done\n"); + if (ms->dma_started) + halt_dma(ms); handle_reset(ms); /* request_q is empty, no point in mesh_start() */ return; @@ -1357,7 +1359,8 @@ static void halt_dma(struct mesh_state *ms) ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd), ms->tgts[ms->conn_tgt].data_goes_out); } - scsi_dma_unmap(cmd); + if (cmd) + scsi_dma_unmap(cmd); ms->dma_started = 0; } @@ -1712,6 +1715,9 @@ static int mesh_host_reset(struct scsi_cmnd *cmd) spin_lock_irqsave(ms->host->host_lock, flags); + if (ms->dma_started) + halt_dma(ms); + /* Reset the controller & dbdma channel */ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ out_8(&mr->exception, 0xff); /* clear all exception bits */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 752b71cfbe12..b7e44634d0dc 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -1632,7 +1632,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget) reply_q = container_of(irqpoll, struct adapter_reply_queue, irqpoll); if (reply_q->irq_line_enable) { - disable_irq(reply_q->os_irq); + disable_irq_nosync(reply_q->os_irq); reply_q->irq_line_enable = false; } num_entries = _base_process_reply_queue(reply_q); @@ -4777,7 +4777,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) } kfree(ioc->hpr_lookup); + ioc->hpr_lookup = NULL; kfree(ioc->internal_lookup); + ioc->internal_lookup = NULL; if (ioc->chain_lookup) { for (i = 0; i < ioc->scsiio_depth; i++) { for (j = ioc->chains_per_prp_buffer; diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index b95f7d062ea4..d5a62fea8fe3 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -2921,19 +2921,18 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr, if (!ioc->is_warpdrive) { ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n", __func__); - goto out; + return 0; } /* pci_access_mutex lock acquired by sysfs show path */ mutex_lock(&ioc->pci_access_mutex); - if (ioc->pci_error_recovery || ioc->remove_host) { - mutex_unlock(&ioc->pci_access_mutex); - return 0; - } + if (ioc->pci_error_recovery || ioc->remove_host) + goto out; /* allocate upto GPIOVal 36 entries */ sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); io_unit_pg3 = kzalloc(sz, GFP_KERNEL); if (!io_unit_pg3) { + rc = -ENOMEM; ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n", __func__, sz); goto out; @@ -2943,6 +2942,7 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr, 0) { ioc_err(ioc, "%s: failed reading iounit_pg3\n", __func__); + rc = -EINVAL; goto out; } @@ -2950,12 +2950,14 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr, if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n", __func__, ioc_status); + rc = -EINVAL; goto out; } if (io_unit_pg3->GPIOCount < 25) { ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n", __func__, io_unit_pg3->GPIOCount); + rc = -EINVAL; goto out; } diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 7e48154e11c3..027bf5b2981b 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -816,7 +816,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); if (res) - return res; + goto ex_err; ccb = &pm8001_ha->ccb_info[ccb_tag]; ccb->device = pm8001_dev; ccb->ccb_tag = ccb_tag; diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index f3f399fe10c8..0da4e16fb23a 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -355,6 +355,7 @@ struct qedf_ctx { #define QEDF_GRCDUMP_CAPTURE 4 #define QEDF_IN_RECOVERY 5 #define QEDF_DBG_STOP_IO 6 +#define QEDF_PROBING 8 unsigned long flags; /* Miscellaneous state flags */ int fipvlan_retries; u8 num_queues; diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 59ca98f12afd..3d0e345947c1 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -3153,7 +3153,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) { int rc = -EINVAL; struct fc_lport *lport; - struct qedf_ctx *qedf; + struct qedf_ctx *qedf = NULL; struct Scsi_Host *host; bool is_vf = false; struct qed_ll2_params params; @@ -3183,6 +3183,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) /* Initialize qedf_ctx */ qedf = lport_priv(lport); + set_bit(QEDF_PROBING, &qedf->flags); qedf->lport = lport; qedf->ctlr.lp = lport; qedf->pdev = pdev; @@ -3206,9 +3207,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) } else { /* Init pointers during recovery */ qedf = pci_get_drvdata(pdev); + set_bit(QEDF_PROBING, &qedf->flags); lport = qedf->lport; } + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n"); + host = lport->host; /* Allocate mempool for qedf_io_work structs */ @@ -3513,6 +3517,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) else fc_fabric_login(lport); + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); + + clear_bit(QEDF_PROBING, &qedf->flags); + /* All good */ return 0; @@ -3538,6 +3546,11 @@ err2: err1: scsi_host_put(lport->host); err0: + if (qedf) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); + + clear_bit(QEDF_PROBING, &qedf->flags); + } return rc; } @@ -3687,11 +3700,25 @@ void qedf_get_protocol_tlv_data(void *dev, void *data) { struct qedf_ctx *qedf = dev; struct qed_mfw_tlv_fcoe *fcoe = data; - struct fc_lport *lport = qedf->lport; - struct Scsi_Host *host = lport->host; - struct fc_host_attrs *fc_host = shost_to_fc_host(host); + struct fc_lport *lport; + struct Scsi_Host *host; + struct fc_host_attrs *fc_host; struct fc_host_statistics *hst; + if (!qedf) { + QEDF_ERR(NULL, "qedf is null.\n"); + return; + } + + if (test_bit(QEDF_PROBING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n"); + return; + } + + lport = qedf->lport; + host = lport->host; + fc_host = shost_to_fc_host(host); + /* Force a refresh of the fc_host stats including offload stats */ hst = qedf_fc_get_host_stats(host); diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 8829880a54c3..0f57c8073406 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -997,7 +997,8 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) if (qedi_ep->state == EP_STATE_OFLDCONN_START) goto ep_exit_recover; - flush_work(&qedi_ep->offload_work); + if (qedi_ep->state != EP_STATE_OFLDCONN_NONE) + flush_work(&qedi_ep->offload_work); if (qedi_ep->conn) { qedi_conn = qedi_ep->conn; @@ -1214,6 +1215,10 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) } iscsi_cid = (u32)path_data->handle; + if (iscsi_cid >= qedi->max_active_conns) { + ret = -EINVAL; + goto set_path_exit; + } qedi_ep = qedi->ep_tbl[iscsi_cid]; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 84bb4a048016..fc6e12fb7d77 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3638,7 +3638,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) qla2x00_clear_loop_id(fcport); fcport->flags |= FCF_FABRIC_DEVICE; } else if (fcport->d_id.b24 != rp->id.b24 || - fcport->scan_needed) { + (fcport->scan_needed && + fcport->port_type != FCT_INITIATOR && + fcport->port_type != FCT_NVME_INITIATOR)) { qlt_schedule_sess_for_deletion(fcport); } fcport->d_id.b24 = rp->id.b24; @@ -3671,10 +3673,22 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) } if (fcport->scan_state != QLA_FCPORT_FOUND) { + bool do_delete = false; + + if (fcport->scan_needed && + fcport->disc_state == DSC_LOGIN_PEND) { + /* Cable got disconnected after we sent + * a login. Do delete to prevent timeout. + */ + fcport->logout_on_delete = 1; + do_delete = true; + } + fcport->scan_needed = 0; - if ((qla_dual_mode_enabled(vha) || - qla_ini_mode_enabled(vha)) && - atomic_read(&fcport->state) == FCS_ONLINE) { + if (((qla_dual_mode_enabled(vha) || + qla_ini_mode_enabled(vha)) && + atomic_read(&fcport->state) == FCS_ONLINE) || + do_delete) { if (fcport->loop_id != FC_NO_LOOP_ID) { if (fcport->flags & FCF_FCP2_DEVICE) fcport->logout_on_delete = 0; diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 62a16463f025..c1631e42d35d 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -335,14 +335,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if (time_after(jiffies, wait_time)) break; - /* - * Check if it's UNLOADING, cause we cannot poll in - * this case, or else a NULL pointer dereference - * is triggered. - */ - if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) - return QLA_FUNCTION_TIMEOUT; - /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 941aa53363f5..f4815a4084d8 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -535,6 +535,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, struct nvme_private *priv = fd->private; struct qla_nvme_rport *qla_rport = rport->private; + if (!priv) { + /* nvme association has been torn down */ + return rval; + } + fcport = qla_rport->fcport; if (!qpair || !fcport || (qpair && !qpair->fw_started) || diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 03d272a09e26..67b1e74fcd1e 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1993,6 +1993,11 @@ skip_pio: /* Determine queue resources */ ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = QLA_BASE_VECTORS; + + /* Check if FW supports MQ or not */ + if (!(ha->fw_attributes & BIT_6)) + goto mqiobase_exit; + if (!ql2xmqsupport || !ql2xnvmeenable || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; @@ -2804,10 +2809,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* This may fail but that's ok */ pci_enable_pcie_error_reporting(pdev); - /* Turn off T10-DIF when FC-NVMe is enabled */ - if (ql2xnvmeenable) - ql2xenabledif = 0; - ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); if (!ha) { ql_log_pci(ql_log_fatal, pdev, 0x0009, @@ -6295,6 +6296,7 @@ qla2x00_do_dpc(void *data) if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags))) { + base_vha->flags.online = 1; ql_dbg(ql_dbg_dpc, base_vha, 0x4007, "ISP abort scheduled.\n"); if (ha->isp_ops->abort_isp(base_vha)) { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index cb8a892e2d39..b75e6e4d58c0 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1262,7 +1262,7 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) qla24xx_chk_fcp_state(sess); - ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, + ql_dbg(ql_dbg_disc, sess->vha, 0xe001, "Scheduling sess %p for deletion %8phC\n", sess, sess->port_name); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index abe7f79bb789..744cd93189da 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -926,6 +926,7 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, atomic_set(&tpg->lport_tpg_enabled, 0); qlt_stop_phase1(vha->vha_tgt.qla_tgt); + qlt_stop_phase2(vha->vha_tgt.qla_tgt); } return count; @@ -1088,6 +1089,7 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item, atomic_set(&tpg->lport_tpg_enabled, 0); qlt_stop_phase1(vha->vha_tgt.qla_tgt); + qlt_stop_phase2(vha->vha_tgt.qla_tgt); } return count; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 32965ec76965..44181a2cbf18 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -5296,6 +5296,12 @@ static int __init scsi_debug_init(void) pr_err("submit_queues must be 1 or more\n"); return -EINVAL; } + + if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) { + pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE); + return -EINVAL; + } + sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue), GFP_KERNEL); if (sdebug_q_arr == NULL) diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index df14597752ec..fb5a7832353c 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -239,6 +239,7 @@ static struct { {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES | BLIST_INQUIRY_36}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 42f0550d6b11..6f41e4b5a2b8 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -63,6 +63,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"LSI", "INF-01-00", "rdac", }, {"ENGENIO", "INF-01-00", "rdac", }, {"LENOVO", "DE_Series", "rdac", }, + {"FUJITSU", "ETERNUS_AHB", "rdac", }, {NULL, NULL, NULL }, }; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 91c007d26c1e..e6944e1cba2b 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -551,7 +551,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd) } } -static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) +static void scsi_free_sgtables(struct scsi_cmnd *cmd) { if (cmd->sdb.table.nents) sg_free_table_chained(&cmd->sdb.table, @@ -563,11 +563,20 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) { - scsi_mq_free_sgtables(cmd); + scsi_free_sgtables(cmd); scsi_uninit_cmd(cmd); scsi_del_cmd_from_list(cmd); } +static void scsi_run_queue_async(struct scsi_device *sdev) +{ + if (scsi_target(sdev)->single_lun || + !list_empty(&sdev->host->starved_list)) + kblockd_schedule_work(&sdev->requeue_work); + else + blk_mq_run_hw_queues(sdev->request_queue, true); +} + /* Returns false when no more bytes to process, true if there are more */ static bool scsi_end_request(struct request *req, blk_status_t error, unsigned int bytes) @@ -612,11 +621,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, __blk_mq_end_request(req, error); - if (scsi_target(sdev)->single_lun || - !list_empty(&sdev->host->starved_list)) - kblockd_schedule_work(&sdev->requeue_work); - else - blk_mq_run_hw_queues(q, true); + scsi_run_queue_async(sdev); percpu_ref_put(&q->q_usage_counter); return false; @@ -1063,7 +1068,7 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd) return BLK_STS_OK; out_free_sgtables: - scsi_mq_free_sgtables(cmd); + scsi_free_sgtables(cmd); return ret; } EXPORT_SYMBOL(scsi_init_io); @@ -1214,6 +1219,7 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + blk_status_t ret; if (!blk_rq_bytes(req)) cmd->sc_data_direction = DMA_NONE; @@ -1223,9 +1229,14 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev, cmd->sc_data_direction = DMA_FROM_DEVICE; if (blk_rq_is_scsi(req)) - return scsi_setup_scsi_cmnd(sdev, req); + ret = scsi_setup_scsi_cmnd(sdev, req); else - return scsi_setup_fs_cmnd(sdev, req); + ret = scsi_setup_fs_cmnd(sdev, req); + + if (ret != BLK_STS_OK) + scsi_free_sgtables(cmd); + + return ret; } static blk_status_t @@ -1723,6 +1734,7 @@ out_put_budget: */ if (req->rq_flags & RQF_DONTPREP) scsi_mq_uninit_cmd(cmd); + scsi_run_queue_async(sdev); break; } return ret; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index a5c78b38d302..dbad926e8f87 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -3174,7 +3174,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, pr_err("%s could not find host no %u\n", __func__, ev->u.set_flashnode.host_no); err = -ENODEV; - goto put_host; + goto exit_set_fnode; } idx = ev->u.set_flashnode.flashnode_idx; diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index f8661062ef95..f3d5b1bbd5aa 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -339,7 +339,7 @@ store_spi_transport_##field(struct device *dev, \ struct spi_transport_attrs *tp \ = (struct spi_transport_attrs *)&starget->starget_data; \ \ - if (i->f->set_##field) \ + if (!i->f->set_##field) \ return -EINVAL; \ val = simple_strtoul(buf, NULL, 0); \ if (val > tp->max_##field) \ diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 4664fdf75c0f..70a28f6fb1d0 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -750,7 +750,7 @@ static int sr_probe(struct device *dev) cd->cdi.disk = disk; if (register_cdrom(&cd->cdi)) - goto fail_put; + goto fail_minor; /* * Initialize block layer runtime PM stuffs before the @@ -768,6 +768,10 @@ static int sr_probe(struct device *dev) return 0; +fail_minor: + spin_lock(&sr_index_lock); + clear_bit(minor, sr_index_bits); + spin_unlock(&sr_index_lock); fail_put: put_disk(disk); fail_free: diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c index e3b0ce25162b..b9db2ec6d036 100644 --- a/drivers/scsi/sr_vendor.c +++ b/drivers/scsi/sr_vendor.c @@ -66,9 +66,6 @@ void sr_vendor_init(Scsi_CD *cd) { -#ifndef CONFIG_BLK_DEV_SR_VENDOR - cd->vendor = VENDOR_SCSI3; -#else const char *vendor = cd->device->vendor; const char *model = cd->device->model; @@ -100,7 +97,6 @@ void sr_vendor_init(Scsi_CD *cd) cd->vendor = VENDOR_TOSHIBA; } -#endif } @@ -114,10 +110,8 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength) struct ccs_modesel_head *modesel; int rc, density = 0; -#ifdef CONFIG_BLK_DEV_SR_VENDOR if (cd->vendor == VENDOR_TOSHIBA) density = (blocklength > 2048) ? 0x81 : 0x83; -#endif buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); if (!buffer) @@ -205,7 +199,6 @@ int sr_cd_check(struct cdrom_device_info *cdi) } break; -#ifdef CONFIG_BLK_DEV_SR_VENDOR case VENDOR_NEC:{ unsigned long min, sec, frame; cgc.cmd[0] = 0xde; @@ -298,7 +291,6 @@ int sr_cd_check(struct cdrom_device_info *cdi) sector = buffer[11] + (buffer[10] << 8) + (buffer[9] << 16) + (buffer[8] << 24); break; -#endif /* CONFIG_BLK_DEV_SR_VENDOR */ default: /* should not happen */ diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 411ef60b2c14..c49e9f6c46f8 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1546,11 +1546,11 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) /* sleep a bit intermittently as we are dumping too much data */ ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); - usleep_range(1000, 1100); + udelay(1000); ufs_qcom_testbus_read(hba); - usleep_range(1000, 1100); + udelay(1000); ufs_qcom_print_unipro_testbus(hba); - usleep_range(1000, 1100); + udelay(1000); } /** diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c index d2197a31abe5..bad366e49159 100644 --- a/drivers/scsi/ufs/ufs_bsg.c +++ b/drivers/scsi/ufs/ufs_bsg.c @@ -106,8 +106,10 @@ static int ufs_bsg_request(struct bsg_job *job) desc_op = bsg_request->upiu_req.qr.opcode; ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff, &desc_len, desc_op); - if (ret) + if (ret) { + pm_runtime_put_sync(hba->dev); goto out; + } /* fall through */ case UPIU_TRANSACTION_NOP_OUT: diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index fe6cad9b2a0d..03985919150b 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -12,6 +12,7 @@ #define UFS_ANY_VENDOR 0xFFFF #define UFS_ANY_MODEL "ANY_MODEL" +#define UFS_VENDOR_MICRON 0x12C #define UFS_VENDOR_TOSHIBA 0x198 #define UFS_VENDOR_SAMSUNG 0x1CE #define UFS_VENDOR_SKHYNIX 0x1AD diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index bc73181b0405..5e502e160554 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -217,6 +217,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, static struct ufs_dev_fix ufs_fixups[] = { /* UFS cards deviations table */ + UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, @@ -1514,6 +1516,7 @@ unblock_reqs: int ufshcd_hold(struct ufs_hba *hba, bool async) { int rc = 0; + bool flush_result; unsigned long flags; if (!ufshcd_is_clkgating_allowed(hba)) @@ -1545,7 +1548,9 @@ start: break; } spin_unlock_irqrestore(hba->host->host_lock, flags); - flush_work(&hba->clk_gating.ungate_work); + flush_result = flush_work(&hba->clk_gating.ungate_work); + if (hba->clk_gating.is_suspended && !flush_result) + goto out; spin_lock_irqsave(hba->host->host_lock, flags); goto start; } @@ -5101,7 +5106,6 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba, err = ufshcd_enable_auto_bkops(hba); else err = ufshcd_disable_auto_bkops(hba); - hba->urgent_bkops_lvl = curr_status; out: return err; } @@ -5608,7 +5612,7 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) */ static irqreturn_t ufshcd_intr(int irq, void *__hba) { - u32 intr_status, enabled_intr_status; + u32 intr_status, enabled_intr_status = 0; irqreturn_t retval = IRQ_NONE; struct ufs_hba *hba = __hba; int retries = hba->nutrs; @@ -5622,7 +5626,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) * read, make sure we handle them by checking the interrupt status * again in a loop until we process all of the reqs before returning. */ - do { + while (intr_status && retries--) { enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); if (intr_status) @@ -5633,7 +5637,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) } intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - } while (intr_status && --retries); + } spin_unlock(hba->host->host_lock); return retval; @@ -6136,7 +6140,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) /* command completed already */ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", __func__, tag); - goto out; + goto cleanup; } else { dev_err(hba->dev, "%s: no response from device. tag = %d, err %d\n", @@ -6170,6 +6174,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) goto out; } +cleanup: scsi_dma_unmap(cmd); spin_lock_irqsave(host->host_lock, flags); diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c index 526e3215d8fe..63ee96eb58c6 100644 --- a/drivers/slimbus/core.c +++ b/drivers/slimbus/core.c @@ -283,6 +283,7 @@ EXPORT_SYMBOL_GPL(slim_register_controller); /* slim_remove_device: Remove the effect of slim_add_device() */ static void slim_remove_device(struct slim_device *sbdev) { + of_node_put(sbdev->dev.of_node); device_unregister(&sbdev->dev); } diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 29fbab55c3b3..01a17d84b606 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -1354,7 +1354,6 @@ static int of_qcom_slim_ngd_register(struct device *parent, ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME; ngd->pdev->dev.of_node = node; ctrl->ngd = ngd; - platform_set_drvdata(ngd->pdev, ctrl); platform_device_add(ngd->pdev); ngd->base = ctrl->base + ngd->id * data->offset + @@ -1369,12 +1368,13 @@ static int of_qcom_slim_ngd_register(struct device *parent, static int qcom_slim_ngd_probe(struct platform_device *pdev) { - struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev->parent); int ret; ctrl->ctrl.dev = dev; + platform_set_drvdata(pdev, ctrl); pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND); pm_runtime_set_suspended(dev); diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index e278fc11fe5c..8924fcd9f5f5 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -148,7 +148,7 @@ int rpmh_rsc_invalidate(struct rsc_drv *drv) static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv, const struct tcs_request *msg) { - int type, ret; + int type; struct tcs_group *tcs; switch (msg->state) { @@ -169,19 +169,10 @@ static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv, * If we are making an active request on a RSC that does not have a * dedicated TCS for active state use, then re-purpose a wake TCS to * send active votes. - * NOTE: The driver must be aware that this RSC does not have a - * dedicated AMC, and therefore would invalidate the sleep and wake - * TCSes before making an active state request. */ tcs = get_tcs_of_type(drv, type); - if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) { + if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) tcs = get_tcs_of_type(drv, WAKE_TCS); - if (tcs->num_tcs) { - ret = rpmh_rsc_invalidate(drv); - if (ret) - return ERR_PTR(ret); - } - } return tcs; } @@ -201,6 +192,42 @@ static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv, return NULL; } +static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger) +{ + u32 enable; + + /* + * HW req: Clear the DRV_CONTROL and enable TCS again + * While clearing ensure that the AMC mode trigger is cleared + * and then the mode enable is cleared. + */ + enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0); + enable &= ~TCS_AMC_MODE_TRIGGER; + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); + enable &= ~TCS_AMC_MODE_ENABLE; + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); + + if (trigger) { + /* Enable the AMC mode on the TCS and then trigger the TCS */ + enable = TCS_AMC_MODE_ENABLE; + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); + enable |= TCS_AMC_MODE_TRIGGER; + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); + } +} + +static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable) +{ + u32 data; + + data = read_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, 0); + if (enable) + data |= BIT(tcs_id); + else + data &= ~BIT(tcs_id); + write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, data); +} + /** * tcs_tx_done: TX Done interrupt handler */ @@ -237,6 +264,14 @@ static irqreturn_t tcs_tx_done(int irq, void *p) } trace_rpmh_tx_done(drv, i, req, err); + + /* + * If wake tcs was re-purposed for sending active + * votes, clear AMC trigger & enable modes and + * disable interrupt for this TCS + */ + if (!drv->tcs[ACTIVE_TCS].num_tcs) + __tcs_set_trigger(drv, i, false); skip: /* Reclaim the TCS */ write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0); @@ -244,6 +279,13 @@ skip: write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i)); spin_lock(&drv->lock); clear_bit(i, drv->tcs_in_use); + /* + * Disable interrupt for WAKE TCS to avoid being + * spammed with interrupts coming when the solver + * sends its wake votes. + */ + if (!drv->tcs[ACTIVE_TCS].num_tcs) + enable_tcs_irq(drv, i, false); spin_unlock(&drv->lock); if (req) rpmh_tx_done(req, err); @@ -285,28 +327,6 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id, write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable); } -static void __tcs_trigger(struct rsc_drv *drv, int tcs_id) -{ - u32 enable; - - /* - * HW req: Clear the DRV_CONTROL and enable TCS again - * While clearing ensure that the AMC mode trigger is cleared - * and then the mode enable is cleared. - */ - enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0); - enable &= ~TCS_AMC_MODE_TRIGGER; - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); - enable &= ~TCS_AMC_MODE_ENABLE; - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); - - /* Enable the AMC mode on the TCS and then trigger the TCS */ - enable = TCS_AMC_MODE_ENABLE; - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); - enable |= TCS_AMC_MODE_TRIGGER; - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); -} - static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs, const struct tcs_request *msg) { @@ -377,10 +397,20 @@ static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg) tcs->req[tcs_id - tcs->offset] = msg; set_bit(tcs_id, drv->tcs_in_use); + if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) { + /* + * Clear previously programmed WAKE commands in selected + * repurposed TCS to avoid triggering them. tcs->slots will be + * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate() + */ + write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0); + write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0); + enable_tcs_irq(drv, tcs_id, true); + } spin_unlock(&drv->lock); __tcs_buffer_write(drv, tcs_id, 0, msg); - __tcs_trigger(drv, tcs_id); + __tcs_set_trigger(drv, tcs_id, true); done_write: spin_unlock_irqrestore(&tcs->lock, flags); @@ -685,6 +715,7 @@ static struct platform_driver rpmh_driver = { .driver = { .name = "rpmh", .of_match_table = rpmh_drv_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index 035091fd44b8..13e1b450f296 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -119,6 +119,7 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr, { struct cache_req *req; unsigned long flags; + u32 old_sleep_val, old_wake_val; spin_lock_irqsave(&ctrlr->cache_lock, flags); req = __find_req(ctrlr, cmd->addr); @@ -133,26 +134,27 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr, req->addr = cmd->addr; req->sleep_val = req->wake_val = UINT_MAX; - INIT_LIST_HEAD(&req->list); list_add_tail(&req->list, &ctrlr->cache); existing: + old_sleep_val = req->sleep_val; + old_wake_val = req->wake_val; + switch (state) { case RPMH_ACTIVE_ONLY_STATE: - if (req->sleep_val != UINT_MAX) - req->wake_val = cmd->data; - break; case RPMH_WAKE_ONLY_STATE: req->wake_val = cmd->data; break; case RPMH_SLEEP_STATE: req->sleep_val = cmd->data; break; - default: - break; } - ctrlr->dirty = true; + ctrlr->dirty |= (req->sleep_val != old_sleep_val || + req->wake_val != old_wake_val) && + req->sleep_val != UINT_MAX && + req->wake_val != UINT_MAX; + unlock: spin_unlock_irqrestore(&ctrlr->cache_lock, flags); @@ -287,6 +289,7 @@ static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req) spin_lock_irqsave(&ctrlr->cache_lock, flags); list_add_tail(&req->list, &ctrlr->batch_cache); + ctrlr->dirty = true; spin_unlock_irqrestore(&ctrlr->cache_lock, flags); } @@ -314,18 +317,6 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr) return ret; } -static void invalidate_batch(struct rpmh_ctrlr *ctrlr) -{ - struct batch_cache_req *req, *tmp; - unsigned long flags; - - spin_lock_irqsave(&ctrlr->cache_lock, flags); - list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) - kfree(req); - INIT_LIST_HEAD(&ctrlr->batch_cache); - spin_unlock_irqrestore(&ctrlr->cache_lock, flags); -} - /** * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the * batch to finish. @@ -465,6 +456,13 @@ int rpmh_flush(const struct device *dev) return 0; } + /* Invalidate the TCSes first to avoid stale data */ + do { + ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); + } while (ret == -EAGAIN); + if (ret) + return ret; + /* First flush the cached batch requests */ ret = flush_batch(ctrlr); if (ret) @@ -496,25 +494,25 @@ int rpmh_flush(const struct device *dev) EXPORT_SYMBOL(rpmh_flush); /** - * rpmh_invalidate: Invalidate all sleep and active sets - * sets. + * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache * * @dev: The device making the request * - * Invalidate the sleep and active values in the TCS blocks. + * Invalidate the sleep and wake values in batch_cache. */ int rpmh_invalidate(const struct device *dev) { struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); - int ret; + struct batch_cache_req *req, *tmp; + unsigned long flags; - invalidate_batch(ctrlr); + spin_lock_irqsave(&ctrlr->cache_lock, flags); + list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) + kfree(req); + INIT_LIST_HEAD(&ctrlr->batch_cache); ctrlr->dirty = true; + spin_unlock_irqrestore(&ctrlr->cache_lock, flags); - do { - ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); - } while (ret == -EAGAIN); - - return ret; + return 0; } EXPORT_SYMBOL(rpmh_invalidate); diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index a39ea5061dc5..176696f8f38d 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -428,6 +428,8 @@ static int qcom_socinfo_probe(struct platform_device *pdev) qs->attr.family = "Snapdragon"; qs->attr.machine = socinfo_machine(&pdev->dev, le32_to_cpu(info->id)); + qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u", + le32_to_cpu(info->id)); qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u", SOCINFO_MAJOR(le32_to_cpu(info->ver)), SOCINFO_MINOR(le32_to_cpu(info->ver))); diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index c8ef05d6b8c7..25df4406ce52 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -130,6 +130,7 @@ config SOC_TEGRA_FLOWCTRL config SOC_TEGRA_PMC bool + select GENERIC_PINCONF config SOC_TEGRA_POWERGATE_BPMP def_bool y diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index d1839707128a..243af8198d1c 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -842,8 +842,9 @@ static int intel_create_dai(struct sdw_cdns *cdns, /* TODO: Read supported rates/formats from hardware */ for (i = off; i < (off + num); i++) { - dais[i].name = kasprintf(GFP_KERNEL, "SDW%d Pin%d", - cdns->instance, i); + dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL, + "SDW%d Pin%d", + cdns->instance, i); if (!dais[i].name) return -ENOMEM; diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c index 6473fa602f82..611f4f5bc36a 100644 --- a/drivers/soundwire/slave.c +++ b/drivers/soundwire/slave.c @@ -57,6 +57,8 @@ static int sdw_slave_add(struct sdw_bus *bus, list_del(&slave->node); mutex_unlock(&bus->bus_lock); put_device(&slave->dev); + + return ret; } sdw_slave_debugfs_init(slave); diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index e69f94a8c3a8..de7c57e17710 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -702,6 +702,7 @@ error: kfree(wbuf); error_1: kfree(wr_msg); + bus->defer_msg.msg = NULL; return ret; } @@ -825,9 +826,10 @@ static int do_bank_switch(struct sdw_stream_runtime *stream) error: list_for_each_entry(m_rt, &stream->master_list, stream_node) { bus = m_rt->bus; - - kfree(bus->defer_msg.msg->buf); - kfree(bus->defer_msg.msg); + if (bus->defer_msg.msg) { + kfree(bus->defer_msg.msg->buf); + kfree(bus->defer_msg.msg); + } } msg_unlock: diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 6f7fdcbb9151..5bf754208777 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -944,4 +944,7 @@ config SPI_SLAVE_SYSTEM_CONTROL endif # SPI_SLAVE +config SPI_DYNAMIC + def_bool ACPI || OF_DYNAMIC || SPI_SLAVE + endif # SPI diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 2663bb12d9ce..b07710c76fc9 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -147,6 +147,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, if (!xfer->tx_buf) return NULL; + memset(&txconf, 0, sizeof(txconf)); txconf.direction = DMA_MEM_TO_DEV; txconf.dst_addr = dws->dma_addr; txconf.dst_maxburst = 16; @@ -193,6 +194,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, if (!xfer->rx_buf) return NULL; + memset(&rxconf, 0, sizeof(rxconf)); rxconf.direction = DMA_DEV_TO_MEM; rxconf.src_addr = dws->dma_addr; rxconf.src_maxburst = 16; @@ -218,19 +220,23 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) { - u16 dma_ctrl = 0; + u16 imr = 0, dma_ctrl = 0; dw_writel(dws, DW_SPI_DMARDLR, 0xf); dw_writel(dws, DW_SPI_DMATDLR, 0x10); - if (xfer->tx_buf) + if (xfer->tx_buf) { dma_ctrl |= SPI_DMA_TDMAE; - if (xfer->rx_buf) + imr |= SPI_INT_TXOI; + } + if (xfer->rx_buf) { dma_ctrl |= SPI_DMA_RDMAE; + imr |= SPI_INT_RXUI | SPI_INT_RXOI; + } dw_writel(dws, DW_SPI_DMACR, dma_ctrl); /* Set the interrupt mask */ - spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI); + spi_umask_intr(dws, imr); dws->transfer_handler = dma_transfer; @@ -260,7 +266,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) dma_async_issue_pending(dws->txchan); } - return 0; + return 1; } static void mid_spi_dma_stop(struct dw_spi *dws) diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 82c5c027ec4c..c2f96941ad04 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -381,11 +381,8 @@ static int dw_spi_transfer_one(struct spi_controller *master, spi_enable_chip(dws, 1); - if (dws->dma_mapped) { - ret = dws->dma_ops->dma_transfer(dws, transfer); - if (ret < 0) - return ret; - } + if (dws->dma_mapped) + return dws->dma_ops->dma_transfer(dws, transfer); if (chip->poll_mode) return poll_transfer(dws); @@ -529,6 +526,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->dma_inited = 0; } else { master->can_dma = dws->dma_ops->can_dma; + master->flags |= SPI_CONTROLLER_MUST_TX; } } diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 68e33457c814..c7560d7d1627 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ // // Copyright 2013 Freescale Semiconductor, Inc. +// Copyright 2020 NXP // // Freescale DSPI driver // This file contains a driver for the Freescale DSPI @@ -33,6 +34,9 @@ #define SPI_MCR_CLR_TXF BIT(11) #define SPI_MCR_CLR_RXF BIT(10) #define SPI_MCR_XSPI BIT(3) +#define SPI_MCR_DIS_TXF BIT(13) +#define SPI_MCR_DIS_RXF BIT(12) +#define SPI_MCR_HALT BIT(0) #define SPI_TCR 0x08 #define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16) @@ -901,6 +905,8 @@ static int dspi_suspend(struct device *dev) struct spi_controller *ctlr = dev_get_drvdata(dev); struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); + if (dspi->irq) + disable_irq(dspi->irq); spi_controller_suspend(ctlr); clk_disable_unprepare(dspi->clk); @@ -921,6 +927,8 @@ static int dspi_resume(struct device *dev) if (ret) return ret; spi_controller_resume(ctlr); + if (dspi->irq) + enable_irq(dspi->irq); return 0; } @@ -1108,8 +1116,8 @@ static int dspi_probe(struct platform_device *pdev) goto poll_mode; } - ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, - IRQF_SHARED, pdev->name, dspi); + ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL, + IRQF_SHARED, pdev->name, dspi); if (ret < 0) { dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); goto out_clk_put; @@ -1122,7 +1130,7 @@ poll_mode: ret = dspi_request_dma(dspi, res->start); if (ret < 0) { dev_err(&pdev->dev, "can't get dma channels\n"); - goto out_clk_put; + goto out_free_irq; } } @@ -1134,11 +1142,14 @@ poll_mode: ret = spi_register_controller(ctlr); if (ret != 0) { dev_err(&pdev->dev, "Problem registering DSPI ctlr\n"); - goto out_clk_put; + goto out_free_irq; } return ret; +out_free_irq: + if (dspi->irq) + free_irq(dspi->irq, dspi); out_clk_put: clk_disable_unprepare(dspi->clk); out_ctlr_put: @@ -1153,13 +1164,29 @@ static int dspi_remove(struct platform_device *pdev) struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); /* Disconnect from the SPI framework */ - dspi_release_dma(dspi); - clk_disable_unprepare(dspi->clk); spi_unregister_controller(dspi->ctlr); + /* Disable RX and TX */ + regmap_update_bits(dspi->regmap, SPI_MCR, + SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF, + SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF); + + /* Stop Running */ + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT); + + dspi_release_dma(dspi); + if (dspi->irq) + free_irq(dspi->irq, dspi); + clk_disable_unprepare(dspi->clk); + return 0; } +static void dspi_shutdown(struct platform_device *pdev) +{ + dspi_remove(pdev); +} + static struct platform_driver fsl_dspi_driver = { .driver.name = DRIVER_NAME, .driver.of_match_table = fsl_dspi_dt_ids, @@ -1167,6 +1194,7 @@ static struct platform_driver fsl_dspi_driver = { .driver.pm = &dspi_pm, .probe = dspi_probe, .remove = dspi_remove, + .shutdown = dspi_shutdown, }; module_platform_driver(fsl_dspi_driver); diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c index 9dfe8b04e688..f9bc1705c0d4 100644 --- a/drivers/spi/spi-lantiq-ssc.c +++ b/drivers/spi/spi-lantiq-ssc.c @@ -184,6 +184,7 @@ struct lantiq_ssc_spi { unsigned int tx_fifo_size; unsigned int rx_fifo_size; unsigned int base_cs; + unsigned int fdx_tx_level; }; static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg) @@ -481,6 +482,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi) u32 data; unsigned int tx_free = tx_fifo_free(spi); + spi->fdx_tx_level = 0; while (spi->tx_todo && tx_free) { switch (spi->bits_per_word) { case 2 ... 8: @@ -509,6 +511,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi) lantiq_ssc_writel(spi, data, LTQ_SPI_TB); tx_free--; + spi->fdx_tx_level++; } } @@ -520,6 +523,13 @@ static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi) u32 data; unsigned int rx_fill = rx_fifo_level(spi); + /* + * Wait until all expected data to be shifted in. + * Otherwise, rx overrun may occur. + */ + while (rx_fill != spi->fdx_tx_level) + rx_fill = rx_fifo_level(spi); + while (rx_fill) { data = lantiq_ssc_readl(spi, LTQ_SPI_RB); @@ -907,7 +917,7 @@ static int lantiq_ssc_probe(struct platform_device *pdev) master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(32); - spi->wq = alloc_ordered_workqueue(dev_name(dev), 0); + spi->wq = alloc_ordered_workqueue(dev_name(dev), WQ_MEM_RECLAIM); if (!spi->wq) { err = -ENOMEM; goto err_clk_put; diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c index 6f18d4952767..51633b2b6437 100644 --- a/drivers/spi/spi-loopback-test.c +++ b/drivers/spi/spi-loopback-test.c @@ -90,7 +90,7 @@ static struct spi_test spi_tests[] = { { .description = "tx/rx-transfer - crossing PAGE_SIZE", .fill_option = FILL_COUNT_8, - .iterate_len = { ITERATE_MAX_LEN }, + .iterate_len = { ITERATE_LEN }, .iterate_tx_align = ITERATE_ALIGN, .iterate_rx_align = ITERATE_ALIGN, .transfer_count = 1, diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 9f0fa9f3116d..de0ba3e5449f 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -108,15 +108,17 @@ static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) return 0; case 2: - if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) || - (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD)))) + if ((tx && + (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) || + (!tx && + (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))) return 0; break; case 4: - if ((tx && (mode & SPI_TX_QUAD)) || - (!tx && (mode & SPI_RX_QUAD))) + if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) || + (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))) return 0; break; diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 6888a4dcff6d..8acf24f7c5d4 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -36,7 +36,6 @@ #define SPI_CFG0_SCK_LOW_OFFSET 8 #define SPI_CFG0_CS_HOLD_OFFSET 16 #define SPI_CFG0_CS_SETUP_OFFSET 24 -#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16 @@ -48,6 +47,8 @@ #define SPI_CFG1_CS_IDLE_MASK 0xff #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 +#define SPI_CFG2_SCK_HIGH_OFFSET 0 +#define SPI_CFG2_SCK_LOW_OFFSET 16 #define SPI_CMD_ACT BIT(0) #define SPI_CMD_RESUME BIT(1) @@ -279,7 +280,7 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable) static void mtk_spi_prepare_transfer(struct spi_master *master, struct spi_transfer *xfer) { - u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0; + u32 spi_clk_hz, div, sck_time, cs_time, reg_val; struct mtk_spi *mdata = spi_master_get_devdata(master); spi_clk_hz = clk_get_rate(mdata->spi_clk); @@ -292,18 +293,18 @@ static void mtk_spi_prepare_transfer(struct spi_master *master, cs_time = sck_time * 2; if (mdata->dev_comp->enhance_timing) { + reg_val = (((sck_time - 1) & 0xffff) + << SPI_CFG2_SCK_HIGH_OFFSET); reg_val |= (((sck_time - 1) & 0xffff) - << SPI_CFG0_SCK_HIGH_OFFSET); - reg_val |= (((sck_time - 1) & 0xffff) - << SPI_ADJUST_CFG0_SCK_LOW_OFFSET); + << SPI_CFG2_SCK_LOW_OFFSET); writel(reg_val, mdata->base + SPI_CFG2_REG); - reg_val |= (((cs_time - 1) & 0xffff) + reg_val = (((cs_time - 1) & 0xffff) << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); reg_val |= (((cs_time - 1) & 0xffff) << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); writel(reg_val, mdata->base + SPI_CFG0_REG); } else { - reg_val |= (((sck_time - 1) & 0xff) + reg_val = (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET); reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index d0d6f1bda1b6..7f4285e2ae68 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -148,6 +148,7 @@ static const struct lpss_config lpss_platforms[] = { .tx_threshold_hi = 48, .cs_sel_shift = 8, .cs_sel_mask = 3 << 8, + .cs_clk_stays_gated = true, }, { /* LPSS_CNL_SSP */ .offset = 0x200, diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 2cc6d9951b52..008b64f4e031 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -286,7 +286,7 @@ static void rockchip_spi_pio_writer(struct rockchip_spi *rs) static void rockchip_spi_pio_reader(struct rockchip_spi *rs) { u32 words = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR); - u32 rx_left = rs->rx_left - words; + u32 rx_left = (rs->rx_left > words) ? rs->rx_left - words : 0; /* the hardware doesn't allow us to change fifo threshold * level while spi is enabled, so instead make sure to leave diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c index 9613cfe3c0a2..09f983524d51 100644 --- a/drivers/spi/spi-sprd-adi.c +++ b/drivers/spi/spi-sprd-adi.c @@ -384,9 +384,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this, sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOCK, WDG_UNLOCK_KEY); /* Load the watchdog timeout value, 50ms is always enough. */ + sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0); sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW, WDG_LOAD_VAL & WDG_LOAD_MASK); - sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0); /* Start the watchdog to reset system */ sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val); diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index b222ce8d083e..9d8ceb63f7db 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -442,7 +443,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, { u32 div, mbrdiv; - div = DIV_ROUND_UP(spi->clk_rate, speed_hz); + /* Ensure spi->clk_rate is even */ + div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz); /* * SPI framework set xfer->speed_hz to master->max_speed_hz if @@ -468,20 +470,27 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, /** * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level * @spi: pointer to the spi controller data structure + * @xfer_len: length of the message to be transferred */ -static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi) +static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len) { - u32 fthlv, half_fifo; + u32 fthlv, half_fifo, packet; /* data packet should not exceed 1/2 of fifo space */ half_fifo = (spi->fifo_size / 2); - if (spi->cur_bpw <= 8) - fthlv = half_fifo; - else if (spi->cur_bpw <= 16) - fthlv = half_fifo / 2; + /* data_packet should not exceed transfer length */ + if (half_fifo > xfer_len) + packet = xfer_len; else - fthlv = half_fifo / 4; + packet = half_fifo; + + if (spi->cur_bpw <= 8) + fthlv = packet; + else if (spi->cur_bpw <= 16) + fthlv = packet / 2; + else + fthlv = packet / 4; /* align packet size with data registers access */ if (spi->cur_bpw > 8) @@ -489,6 +498,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi) else fthlv -= (fthlv % 4); /* multiple of 4 */ + if (!fthlv) + fthlv = 1; + return fthlv; } @@ -919,7 +931,11 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) } if (sr & STM32H7_SPI_SR_SUSP) { - dev_warn(spi->dev, "Communication suspended\n"); + static DEFINE_RATELIMIT_STATE(rs, + DEFAULT_RATELIMIT_INTERVAL * 10, + 1); + if (__ratelimit(&rs)) + dev_dbg_ratelimited(spi->dev, "Communication suspended\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) stm32h7_spi_read_rxfifo(spi, false); /* @@ -961,13 +977,13 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) stm32h7_spi_read_rxfifo(spi, false); - writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR); + writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR); spin_unlock_irqrestore(&spi->lock, flags); if (end) { - spi_finalize_current_transfer(master); stm32h7_spi_disable(spi); + spi_finalize_current_transfer(master); } return IRQ_HANDLED; @@ -1395,7 +1411,7 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi) cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) & STM32H7_SPI_CFG1_DSIZE; - spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi); + spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen); fthlv = spi->cur_fthlv - 1; cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV; @@ -1578,39 +1594,33 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, unsigned long flags; unsigned int comm_type; int nb_words, ret = 0; + int mbr; spin_lock_irqsave(&spi->lock, flags); - if (spi->cur_bpw != transfer->bits_per_word) { - spi->cur_bpw = transfer->bits_per_word; - spi->cfg->set_bpw(spi); + spi->cur_xferlen = transfer->len; + + spi->cur_bpw = transfer->bits_per_word; + spi->cfg->set_bpw(spi); + + /* Update spi->cur_speed with real clock speed */ + mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, + spi->cfg->baud_rate_div_min, + spi->cfg->baud_rate_div_max); + if (mbr < 0) { + ret = mbr; + goto out; } - if (spi->cur_speed != transfer->speed_hz) { - int mbr; - - /* Update spi->cur_speed with real clock speed */ - mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, - spi->cfg->baud_rate_div_min, - spi->cfg->baud_rate_div_max); - if (mbr < 0) { - ret = mbr; - goto out; - } - - transfer->speed_hz = spi->cur_speed; - stm32_spi_set_mbr(spi, mbr); - } + transfer->speed_hz = spi->cur_speed; + stm32_spi_set_mbr(spi, mbr); comm_type = stm32_spi_communication_type(spi_dev, transfer); - if (spi->cur_comm != comm_type) { - ret = spi->cfg->set_mode(spi, comm_type); + ret = spi->cfg->set_mode(spi, comm_type); + if (ret < 0) + goto out; - if (ret < 0) - goto out; - - spi->cur_comm = comm_type; - } + spi->cur_comm = comm_type; if (spi->cfg->set_data_idleness) spi->cfg->set_data_idleness(spi, transfer->len); @@ -1628,8 +1638,6 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, goto out; } - spi->cur_xferlen = transfer->len; - dev_dbg(spi->dev, "transfer communication mode set to %d\n", spi->cur_comm); dev_dbg(spi->dev, @@ -1986,6 +1994,8 @@ static int stm32_spi_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); + pinctrl_pm_select_sleep_state(&pdev->dev); + return 0; } @@ -1997,13 +2007,18 @@ static int stm32_spi_runtime_suspend(struct device *dev) clk_disable_unprepare(spi->clk); - return 0; + return pinctrl_pm_select_sleep_state(dev); } static int stm32_spi_runtime_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct stm32_spi *spi = spi_master_get_devdata(master); + int ret; + + ret = pinctrl_pm_select_default_state(dev); + if (ret) + return ret; return clk_prepare_enable(spi->clk); } @@ -2033,10 +2048,23 @@ static int stm32_spi_resume(struct device *dev) return ret; ret = spi_master_resume(master); - if (ret) + if (ret) { clk_disable_unprepare(spi->clk); + return ret; + } - return ret; + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "Unable to power device:%d\n", ret); + return ret; + } + + spi->cfg->config(spi); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return 0; } #endif diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index ec7967be9e2f..956df79035d5 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -198,7 +198,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, struct spi_transfer *tfr) { struct sun6i_spi *sspi = spi_master_get_devdata(master); - unsigned int mclk_rate, div, timeout; + unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout; unsigned int start, end, tx_time; unsigned int trig_level; unsigned int tx_len = 0; @@ -287,14 +287,12 @@ static int sun6i_spi_transfer_one(struct spi_master *master, * First try CDR2, and if we can't reach the expected * frequency, fall back to CDR1. */ - div = mclk_rate / (2 * tfr->speed_hz); - if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { - if (div > 0) - div--; - - reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS; + div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz); + div_cdr2 = DIV_ROUND_UP(div_cdr1, 2); + if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { + reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS; } else { - div = ilog2(mclk_rate) - ilog2(tfr->speed_hz); + div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1)); reg = SUN6I_CLK_CTL_CDR1(div); } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 6bfbf0cfcf63..982753ac1bf6 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -475,6 +475,12 @@ static LIST_HEAD(spi_controller_list); */ static DEFINE_MUTEX(board_lock); +/* + * Prevents addition of devices with same chip select and + * addition of devices below an unregistering controller. + */ +static DEFINE_MUTEX(spi_add_lock); + /** * spi_alloc_device - Allocate a new SPI device * @ctlr: Controller to which device is connected @@ -553,7 +559,6 @@ static int spi_dev_check(struct device *dev, void *data) */ int spi_add_device(struct spi_device *spi) { - static DEFINE_MUTEX(spi_add_lock); struct spi_controller *ctlr = spi->controller; struct device *dev = ctlr->dev.parent; int status; @@ -581,6 +586,13 @@ int spi_add_device(struct spi_device *spi) goto done; } + /* Controller may unregister concurrently */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && + !device_is_registered(&ctlr->dev)) { + status = -ENODEV; + goto done; + } + /* Descriptors take precedence */ if (ctlr->cs_gpiods) spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; @@ -1229,8 +1241,6 @@ out: if (msg->status && ctlr->handle_err) ctlr->handle_err(ctlr, msg); - spi_res_release(ctlr, msg); - spi_finalize_current_message(ctlr); return ret; @@ -1513,6 +1523,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr) spi_unmap_msg(ctlr, mesg); + /* In the prepare_messages callback the spi bus has the opportunity to + * split a transfer to smaller chunks. + * Release splited transfers here since spi_map_msg is done on the + * splited transfers. + */ + spi_res_release(ctlr, mesg); + if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { ret = ctlr->unprepare_message(ctlr, mesg); if (ret) { @@ -1950,6 +1967,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) } lookup->max_speed_hz = sb->connection_speed; + lookup->bits_per_word = sb->data_bit_length; if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) lookup->mode |= SPI_CPHA; @@ -2581,6 +2599,10 @@ void spi_unregister_controller(struct spi_controller *ctlr) struct spi_controller *found; int id = ctlr->bus_num; + /* Prevent addition of new devices, unregister existing ones */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_lock(&spi_add_lock); + device_for_each_child(&ctlr->dev, NULL, __unregister); /* First make sure that this controller was ever added */ @@ -2601,6 +2623,9 @@ void spi_unregister_controller(struct spi_controller *ctlr) if (found == ctlr) idr_remove(&spi_master_idr, id); mutex_unlock(&board_lock); + + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_unlock(&spi_add_lock); } EXPORT_SYMBOL_GPL(spi_unregister_controller); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index ab2c3848f5bf..be503a0e6ef7 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -223,6 +223,11 @@ static int spidev_message(struct spidev_data *spidev, for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n; n--, k_tmp++, u_tmp++) { + /* Ensure that also following allocations from rx_buf/tx_buf will meet + * DMA alignment requirements. + */ + unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN); + k_tmp->len = u_tmp->len; total += k_tmp->len; @@ -238,17 +243,17 @@ static int spidev_message(struct spidev_data *spidev, if (u_tmp->rx_buf) { /* this transfer needs space in RX bounce buffer */ - rx_total += k_tmp->len; + rx_total += len_aligned; if (rx_total > bufsiz) { status = -EMSGSIZE; goto done; } k_tmp->rx_buf = rx_buf; - rx_buf += k_tmp->len; + rx_buf += len_aligned; } if (u_tmp->tx_buf) { /* this transfer needs space in TX bounce buffer */ - tx_total += k_tmp->len; + tx_total += len_aligned; if (tx_total > bufsiz) { status = -EMSGSIZE; goto done; @@ -258,7 +263,7 @@ static int spidev_message(struct spidev_data *spidev, (uintptr_t) u_tmp->tx_buf, u_tmp->len)) goto done; - tx_buf += k_tmp->len; + tx_buf += len_aligned; } k_tmp->cs_change = !!u_tmp->cs_change; @@ -290,16 +295,16 @@ static int spidev_message(struct spidev_data *spidev, goto done; /* copy any rx data out of bounce buffer */ - rx_buf = spidev->rx_buffer; - for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { + for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; + n; + n--, k_tmp++, u_tmp++) { if (u_tmp->rx_buf) { if (copy_to_user((u8 __user *) - (uintptr_t) u_tmp->rx_buf, rx_buf, + (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf, u_tmp->len)) { status = -EFAULT; goto done; } - rx_buf += u_tmp->len; } } status = total; @@ -605,15 +610,20 @@ err_find_dev: static int spidev_release(struct inode *inode, struct file *filp) { struct spidev_data *spidev; + int dofree; mutex_lock(&device_list_lock); spidev = filp->private_data; filp->private_data = NULL; + spin_lock_irq(&spidev->spi_lock); + /* ... after we unbound from the underlying device? */ + dofree = (spidev->spi == NULL); + spin_unlock_irq(&spidev->spi_lock); + /* last close? */ spidev->users--; if (!spidev->users) { - int dofree; kfree(spidev->tx_buffer); spidev->tx_buffer = NULL; @@ -621,19 +631,14 @@ static int spidev_release(struct inode *inode, struct file *filp) kfree(spidev->rx_buffer); spidev->rx_buffer = NULL; - spin_lock_irq(&spidev->spi_lock); - if (spidev->spi) - spidev->speed_hz = spidev->spi->max_speed_hz; - - /* ... after we unbound from the underlying device? */ - dofree = (spidev->spi == NULL); - spin_unlock_irq(&spidev->spi_lock); - if (dofree) kfree(spidev); + else + spidev->speed_hz = spidev->spi->max_speed_hz; } #ifdef CONFIG_SPI_SLAVE - spi_slave_abort(spidev->spi); + if (!dofree) + spi_slave_abort(spidev->spi); #endif mutex_unlock(&device_list_lock); @@ -783,13 +788,13 @@ static int spidev_remove(struct spi_device *spi) { struct spidev_data *spidev = spi_get_drvdata(spi); + /* prevent new opens */ + mutex_lock(&device_list_lock); /* make sure ops on existing fds can abort cleanly */ spin_lock_irq(&spidev->spi_lock); spidev->spi = NULL; spin_unlock_irq(&spidev->spi_lock); - /* prevent new opens */ - mutex_lock(&device_list_lock); list_del(&spidev->device_entry); device_destroy(spidev_class, spidev->devt); clear_bit(MINOR(spidev->devt), minors); diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index c6695354b123..19b0cc5ea33f 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -95,6 +95,15 @@ static DEFINE_MUTEX(ashmem_mutex); static struct kmem_cache *ashmem_area_cachep __read_mostly; static struct kmem_cache *ashmem_range_cachep __read_mostly; +/* + * A separate lockdep class for the backing shmem inodes to resolve the lockdep + * warning about the race between kswapd taking fs_reclaim before inode_lock + * and write syscall taking inode_lock and then fs_reclaim. + * Note that such race is impossible because ashmem does not support write + * syscalls operating on the backing shmem. + */ +static struct lock_class_key backing_shmem_inode_class; + static inline unsigned long range_size(struct ashmem_range *range) { return range->pgend - range->pgstart + 1; @@ -396,6 +405,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) if (!asma->file) { char *name = ASHMEM_NAME_DEF; struct file *vmfile; + struct inode *inode; if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') name = asma->name; @@ -407,6 +417,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) goto out; } vmfile->f_mode |= FMODE_LSEEK; + inode = file_inode(vmfile); + lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class); asma->file = vmfile; /* * override mmap operation of the vmfile so that it can't be diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index 473b465724f1..0755b11348ed 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -99,12 +99,12 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) { - void *addr = vm_map_ram(pages, num, -1, pgprot); + void *addr = vmap(pages, num, VM_MAP, pgprot); if (!addr) return -ENOMEM; memset(addr, 0, PAGE_SIZE * num); - vm_unmap_ram(addr, num); + vunmap(addr); return 0; } diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c index 560649be9d13..e035c9f757a1 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1032.c +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c @@ -106,14 +106,22 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, unsigned int *data) { struct apci1032_private *devpriv = dev->private; - unsigned int shift, oldmask; + unsigned int shift, oldmask, himask, lomask; switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: if (data[1] != 0) return -EINVAL; shift = data[3]; - oldmask = (1U << shift) - 1; + if (shift < 32) { + oldmask = (1U << shift) - 1; + himask = data[4] << shift; + lomask = data[5] << shift; + } else { + oldmask = 0xffffffffu; + himask = 0; + lomask = 0; + } switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: devpriv->ctrl = 0; @@ -136,8 +144,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA | @@ -154,8 +162,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; default: return -EINVAL; diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c index 45ad4ba92f94..816dd25b9d0e 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1500.c +++ b/drivers/staging/comedi/drivers/addi_apci_1500.c @@ -452,13 +452,14 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, struct apci1500_private *devpriv = dev->private; unsigned int trig = data[1]; unsigned int shift = data[3]; - unsigned int hi_mask = data[4] << shift; - unsigned int lo_mask = data[5] << shift; - unsigned int chan_mask = hi_mask | lo_mask; - unsigned int old_mask = (1 << shift) - 1; - unsigned int pm = devpriv->pm[trig] & old_mask; - unsigned int pt = devpriv->pt[trig] & old_mask; - unsigned int pp = devpriv->pp[trig] & old_mask; + unsigned int hi_mask; + unsigned int lo_mask; + unsigned int chan_mask; + unsigned int old_mask; + unsigned int pm; + unsigned int pt; + unsigned int pp; + unsigned int invalid_chan; if (trig > 1) { dev_dbg(dev->class_dev, @@ -466,11 +467,28 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, return -EINVAL; } - if (chan_mask > 0xffff) { + if (shift <= 16) { + hi_mask = data[4] << shift; + lo_mask = data[5] << shift; + old_mask = (1U << shift) - 1; + invalid_chan = (data[4] | data[5]) >> (16 - shift); + } else { + hi_mask = 0; + lo_mask = 0; + old_mask = 0xffff; + invalid_chan = data[4] | data[5]; + } + chan_mask = hi_mask | lo_mask; + + if (invalid_chan) { dev_dbg(dev->class_dev, "invalid digital trigger channel\n"); return -EINVAL; } + pm = devpriv->pm[trig] & old_mask; + pt = devpriv->pt[trig] & old_mask; + pp = devpriv->pp[trig] & old_mask; + switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: /* clear trigger configuration */ diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c index 10501fe6bb25..1268ba34be5f 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1564.c +++ b/drivers/staging/comedi/drivers/addi_apci_1564.c @@ -331,14 +331,22 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, unsigned int *data) { struct apci1564_private *devpriv = dev->private; - unsigned int shift, oldmask; + unsigned int shift, oldmask, himask, lomask; switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: if (data[1] != 0) return -EINVAL; shift = data[3]; - oldmask = (1U << shift) - 1; + if (shift < 32) { + oldmask = (1U << shift) - 1; + himask = data[4] << shift; + lomask = data[5] << shift; + } else { + oldmask = 0xffffffffu; + himask = 0; + lomask = 0; + } switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: devpriv->ctrl = 0; @@ -362,8 +370,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA | @@ -380,8 +388,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; default: return -EINVAL; diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c index 4d1eccb5041d..4518c2680b7c 100644 --- a/drivers/staging/comedi/drivers/ni_6527.c +++ b/drivers/staging/comedi/drivers/ni_6527.c @@ -332,7 +332,7 @@ static int ni6527_intr_insn_config(struct comedi_device *dev, case COMEDI_DIGITAL_TRIG_ENABLE_EDGES: /* check shift amount */ shift = data[3]; - if (shift >= s->n_chan) { + if (shift >= 32) { mask = 0; rising = 0; falling = 0; diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c index 5f0e089573a2..af26bc9f184a 100644 --- a/drivers/staging/gasket/gasket_sysfs.c +++ b/drivers/staging/gasket/gasket_sysfs.c @@ -339,6 +339,7 @@ void gasket_sysfs_put_attr(struct device *device, dev_err(device, "Unable to put unknown attribute: %s\n", attr->attr.attr.name); + put_mapping(mapping); } EXPORT_SYMBOL(gasket_sysfs_put_attr); @@ -372,6 +373,7 @@ ssize_t gasket_sysfs_register_store(struct device *device, gasket_dev = mapping->gasket_dev; if (!gasket_dev) { dev_err(device, "Device driver may have been removed\n"); + put_mapping(mapping); return 0; } diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c index 4ac30accf226..cc329b990e16 100644 --- a/drivers/staging/greybus/audio_topology.c +++ b/drivers/staging/greybus/audio_topology.c @@ -460,6 +460,15 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, val = ucontrol->value.integer.value[0] & mask; connect = !!val; + ret = gb_pm_runtime_get_sync(bundle); + if (ret) + return ret; + + ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id, + GB_AUDIO_INVALID_INDEX, &gbvalue); + if (ret) + goto exit; + /* update ucontrol */ if (gbvalue.value.integer_value[0] != val) { for (wi = 0; wi < wlist->num_widgets; wi++) { @@ -473,25 +482,17 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, gbvalue.value.integer_value[0] = cpu_to_le32(ucontrol->value.integer.value[0]); - ret = gb_pm_runtime_get_sync(bundle); - if (ret) - return ret; - ret = gb_audio_gb_set_control(module->mgmt_connection, data->ctl_id, GB_AUDIO_INVALID_INDEX, &gbvalue); - - gb_pm_runtime_put_autosuspend(bundle); - - if (ret) { - dev_err_ratelimited(codec->dev, - "%d:Error in %s for %s\n", ret, - __func__, kcontrol->id.name); - return ret; - } } - return 0; +exit: + gb_pm_runtime_put_autosuspend(bundle); + if (ret) + dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret, + __func__, kcontrol->id.name); + return ret; } #define SOC_DAPM_MIXER_GB(xname, kcount, data) \ diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c index d6ba25f21d80..d2672b65c3f4 100644 --- a/drivers/staging/greybus/light.c +++ b/drivers/staging/greybus/light.c @@ -1026,7 +1026,8 @@ static int gb_lights_light_config(struct gb_lights *glights, u8 id) light->channels_count = conf.channel_count; light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL); - + if (!light->name) + return -ENOMEM; light->channels = kcalloc(light->channels_count, sizeof(struct gb_channel), GFP_KERNEL); if (!light->channels) diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c index 68c5718be827..c4b16bb5c1a4 100644 --- a/drivers/staging/greybus/sdio.c +++ b/drivers/staging/greybus/sdio.c @@ -411,6 +411,7 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) struct gb_sdio_command_request request = {0}; struct gb_sdio_command_response response; struct mmc_data *data = host->mrq->data; + unsigned int timeout_ms; u8 cmd_flags; u8 cmd_type; int i; @@ -469,9 +470,12 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) request.data_blksz = cpu_to_le16(data->blksz); } - ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND, - &request, sizeof(request), &response, - sizeof(response)); + timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : + GB_OPERATION_TIMEOUT_DEFAULT; + + ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND, + &request, sizeof(request), &response, + sizeof(response), timeout_ms); if (ret < 0) goto out; diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c index 6f628195c4da..021bbd420390 100644 --- a/drivers/staging/media/imx/imx7-mipi-csis.c +++ b/drivers/staging/media/imx/imx7-mipi-csis.c @@ -657,28 +657,6 @@ static int mipi_csis_init_cfg(struct v4l2_subdev *mipi_sd, return 0; } -static struct csis_pix_format const * -mipi_csis_try_format(struct v4l2_subdev *mipi_sd, struct v4l2_mbus_framefmt *mf) -{ - struct csi_state *state = mipi_sd_to_csis_state(mipi_sd); - struct csis_pix_format const *csis_fmt; - - csis_fmt = find_csis_format(mf->code); - if (!csis_fmt) - csis_fmt = &mipi_csis_formats[0]; - - v4l_bound_align_image(&mf->width, 1, CSIS_MAX_PIX_WIDTH, - csis_fmt->pix_width_alignment, - &mf->height, 1, CSIS_MAX_PIX_HEIGHT, 1, - 0); - - state->format_mbus.code = csis_fmt->code; - state->format_mbus.width = mf->width; - state->format_mbus.height = mf->height; - - return csis_fmt; -} - static struct v4l2_mbus_framefmt * mipi_csis_get_format(struct csi_state *state, struct v4l2_subdev_pad_config *cfg, @@ -691,40 +669,6 @@ mipi_csis_get_format(struct csi_state *state, return &state->format_mbus; } -static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *sdformat) -{ - struct csi_state *state = mipi_sd_to_csis_state(mipi_sd); - struct csis_pix_format const *csis_fmt; - struct v4l2_mbus_framefmt *fmt; - - if (sdformat->pad >= CSIS_PADS_NUM) - return -EINVAL; - - fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad); - - mutex_lock(&state->lock); - if (sdformat->pad == CSIS_PAD_SOURCE) { - sdformat->format = *fmt; - goto unlock; - } - - csis_fmt = mipi_csis_try_format(mipi_sd, &sdformat->format); - - sdformat->format = *fmt; - - if (csis_fmt && sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE) - state->csis_fmt = csis_fmt; - else - cfg->try_fmt = sdformat->format; - -unlock: - mutex_unlock(&state->lock); - - return 0; -} - static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *sdformat) @@ -733,11 +677,59 @@ static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd, struct v4l2_mbus_framefmt *fmt; mutex_lock(&state->lock); + fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad); + sdformat->format = *fmt; + mutex_unlock(&state->lock); + + return 0; +} + +static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *sdformat) +{ + struct csi_state *state = mipi_sd_to_csis_state(mipi_sd); + struct csis_pix_format const *csis_fmt; + struct v4l2_mbus_framefmt *fmt; + + /* + * The CSIS can't transcode in any way, the source format can't be + * modified. + */ + if (sdformat->pad == CSIS_PAD_SOURCE) + return mipi_csis_get_fmt(mipi_sd, cfg, sdformat); + + if (sdformat->pad != CSIS_PAD_SINK) + return -EINVAL; fmt = mipi_csis_get_format(state, cfg, sdformat->which, sdformat->pad); + mutex_lock(&state->lock); + + /* Validate the media bus code and clamp the size. */ + csis_fmt = find_csis_format(sdformat->format.code); + if (!csis_fmt) + csis_fmt = &mipi_csis_formats[0]; + + fmt->code = csis_fmt->code; + fmt->width = sdformat->format.width; + fmt->height = sdformat->format.height; + + v4l_bound_align_image(&fmt->width, 1, CSIS_MAX_PIX_WIDTH, + csis_fmt->pix_width_alignment, + &fmt->height, 1, CSIS_MAX_PIX_HEIGHT, 1, 0); + sdformat->format = *fmt; + /* Propagate the format from sink to source. */ + fmt = mipi_csis_get_format(state, cfg, sdformat->which, + CSIS_PAD_SOURCE); + *fmt = sdformat->format; + + /* Store the CSIS format descriptor for active formats. */ + if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE) + state->csis_fmt = csis_fmt; + mutex_unlock(&state->lock); return 0; diff --git a/drivers/staging/media/ipu3/ipu3-mmu.c b/drivers/staging/media/ipu3/ipu3-mmu.c index 3d969b0522ab..abcf1f3e5f63 100644 --- a/drivers/staging/media/ipu3/ipu3-mmu.c +++ b/drivers/staging/media/ipu3/ipu3-mmu.c @@ -174,8 +174,10 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx) spin_lock_irqsave(&mmu->lock, flags); l2pt = mmu->l2pts[l1pt_idx]; - if (l2pt) - goto done; + if (l2pt) { + spin_unlock_irqrestore(&mmu->lock, flags); + return l2pt; + } spin_unlock_irqrestore(&mmu->lock, flags); @@ -190,8 +192,9 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx) l2pt = mmu->l2pts[l1pt_idx]; if (l2pt) { + spin_unlock_irqrestore(&mmu->lock, flags); imgu_mmu_free_page_table(new_l2pt); - goto done; + return l2pt; } l2pt = new_l2pt; @@ -200,7 +203,6 @@ static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx) pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt)); mmu->l1pt[l1pt_idx] = pteval; -done: spin_unlock_irqrestore(&mmu->lock, flags); return l2pt; } diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c index 3c7ad1eed434..c764cb55dc8d 100644 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c @@ -367,8 +367,10 @@ static void imgu_vb2_buf_queue(struct vb2_buffer *vb) vb2_set_plane_payload(vb, 0, need_bytes); + mutex_lock(&imgu->streaming_lock); if (imgu->streaming) imgu_queue_buffers(imgu, false, node->pipe); + mutex_unlock(&imgu->streaming_lock); dev_dbg(&imgu->pci_dev->dev, "%s for pipe %u node %u", __func__, node->pipe, node->id); @@ -468,10 +470,13 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) dev_dbg(dev, "%s node name %s pipe %u id %u", __func__, node->name, node->pipe, node->id); + mutex_lock(&imgu->streaming_lock); if (imgu->streaming) { r = -EBUSY; + mutex_unlock(&imgu->streaming_lock); goto fail_return_bufs; } + mutex_unlock(&imgu->streaming_lock); if (!node->enabled) { dev_err(dev, "IMGU node is not enabled"); @@ -498,9 +503,11 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) /* Start streaming of the whole pipeline now */ dev_dbg(dev, "IMGU streaming is ready to start"); + mutex_lock(&imgu->streaming_lock); r = imgu_s_stream(imgu, true); if (!r) imgu->streaming = true; + mutex_unlock(&imgu->streaming_lock); return 0; @@ -532,6 +539,7 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq) dev_err(&imgu->pci_dev->dev, "failed to stop subdev streaming\n"); + mutex_lock(&imgu->streaming_lock); /* Was this the first node with streaming disabled? */ if (imgu->streaming && imgu_all_nodes_streaming(imgu, node)) { /* Yes, really stop streaming now */ @@ -542,6 +550,8 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq) } imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR); + mutex_unlock(&imgu->streaming_lock); + media_pipeline_stop(&node->vdev.entity); } diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c index 06a61f31ca50..08eb6791918b 100644 --- a/drivers/staging/media/ipu3/ipu3.c +++ b/drivers/staging/media/ipu3/ipu3.c @@ -261,6 +261,7 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe ivb = list_first_entry(&imgu_pipe->nodes[node].buffers, struct imgu_vb2_buffer, list); + list_del(&ivb->list); vb = &ivb->vbb.vb2_buf; r = imgu_css_set_parameters(&imgu->css, pipe, vb2_plane_vaddr(vb, 0)); @@ -274,7 +275,6 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe vb2_buffer_done(vb, VB2_BUF_STATE_DONE); dev_dbg(&imgu->pci_dev->dev, "queue user parameters %d to css.", vb->index); - list_del(&ivb->list); } else if (imgu_pipe->queue_enabled[node]) { struct imgu_css_buffer *buf = imgu_queue_getbuf(imgu, node, pipe); @@ -663,6 +663,7 @@ static int imgu_pci_probe(struct pci_dev *pci_dev, return r; mutex_init(&imgu->lock); + mutex_init(&imgu->streaming_lock); atomic_set(&imgu->qbuf_barrier, 0); init_waitqueue_head(&imgu->buf_drain_wq); @@ -726,6 +727,7 @@ out_mmu_exit: out_css_powerdown: imgu_css_set_powerdown(&pci_dev->dev, imgu->base); out_mutex_destroy: + mutex_destroy(&imgu->streaming_lock); mutex_destroy(&imgu->lock); return r; @@ -743,6 +745,7 @@ static void imgu_pci_remove(struct pci_dev *pci_dev) imgu_css_set_powerdown(&pci_dev->dev, imgu->base); imgu_dmamap_exit(imgu); imgu_mmu_exit(imgu->mmu); + mutex_destroy(&imgu->streaming_lock); mutex_destroy(&imgu->lock); } diff --git a/drivers/staging/media/ipu3/ipu3.h b/drivers/staging/media/ipu3/ipu3.h index 73b123b2b8a2..8cd6a0077d99 100644 --- a/drivers/staging/media/ipu3/ipu3.h +++ b/drivers/staging/media/ipu3/ipu3.h @@ -146,6 +146,10 @@ struct imgu_device { * vid_buf.list and css->queue */ struct mutex lock; + + /* Lock to protect writes to streaming flag in this struct */ + struct mutex streaming_lock; + /* Forbid streaming and buffer queuing during system suspend. */ atomic_t qbuf_barrier; /* Indicate if system suspend take place while imgu is streaming. */ diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c index 3439f6ad6338..e80e82a276e9 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus.c @@ -159,6 +159,7 @@ static int cedrus_request_validate(struct media_request *req) struct v4l2_ctrl *ctrl_test; unsigned int count; unsigned int i; + int ret = 0; list_for_each_entry(obj, &req->objects, list) { struct vb2_buffer *vb; @@ -203,12 +204,16 @@ static int cedrus_request_validate(struct media_request *req) if (!ctrl_test) { v4l2_info(&ctx->dev->v4l2_dev, "Missing required codec control\n"); - return -ENOENT; + ret = -ENOENT; + break; } } v4l2_ctrl_request_hdl_put(hdl); + if (ret) + return ret; + return vb2_request_validate(req); } diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c index 56ca4c9ad01c..47940f02457b 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c @@ -65,6 +65,8 @@ void cedrus_device_run(void *priv) v4l2_m2m_buf_copy_metadata(run.src, run.dst, true); + cedrus_dst_format_set(dev, &ctx->dst_fmt); + dev->dec_ops[ctx->current_codec]->setup(ctx, &run); /* Complete request(s) controls if needed. */ diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c index eeee3efd247b..966f9f3ed9d3 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c @@ -286,7 +286,6 @@ static int cedrus_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cedrus_ctx *ctx = cedrus_file2ctx(file); - struct cedrus_dev *dev = ctx->dev; struct vb2_queue *vq; int ret; @@ -300,8 +299,6 @@ static int cedrus_s_fmt_vid_cap(struct file *file, void *priv, ctx->dst_fmt = f->fmt.pix; - cedrus_dst_format_set(dev, &ctx->dst_fmt); - return 0; } diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c index 1ec3b237212e..7cee7b4d5270 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c @@ -1729,9 +1729,11 @@ int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_ if ((ndisauthmode == Ndis802_11AuthModeWPA) || (ndisauthmode == Ndis802_11AuthModeWPAPSK)) authmode = _WPA_IE_ID_; - if ((ndisauthmode == Ndis802_11AuthModeWPA2) || + else if ((ndisauthmode == Ndis802_11AuthModeWPA2) || (ndisauthmode == Ndis802_11AuthModeWPA2PSK)) authmode = _WPA2_IE_ID_; + else + authmode = 0x0; if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) { memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len); diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 511136dce3a4..ddc09616248a 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -2401,7 +2401,7 @@ static int rtl8192_read_eeprom_info(struct net_device *dev) ret = eprom_read(dev, (EEPROM_TX_PW_INDEX_CCK >> 1)); if (ret < 0) return ret; - priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff) >> 8; + priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff00) >> 8; } else priv->EEPROMTxPowerLevelCCK = 0x10; RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK); diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c index 40145c0338e4..42c0a3c947f1 100644 --- a/drivers/staging/rtl8712/hal_init.c +++ b/drivers/staging/rtl8712/hal_init.c @@ -33,7 +33,6 @@ static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context) { struct _adapter *adapter = context; - complete(&adapter->rtl8712_fw_ready); if (!firmware) { struct usb_device *udev = adapter->dvobjpriv.pusbdev; struct usb_interface *usb_intf = adapter->pusb_intf; @@ -41,11 +40,13 @@ static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context) dev_err(&udev->dev, "r8712u: Firmware request failed\n"); usb_put_dev(udev); usb_set_intfdata(usb_intf, NULL); + complete(&adapter->rtl8712_fw_ready); return; } adapter->fw = firmware; /* firmware available - start netdev */ register_netdev(adapter->pnetdev); + complete(&adapter->rtl8712_fw_ready); } static const char firmware_file[] = "rtlwifi/rtl8712u.bin"; diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c index a87562f632a7..2fcd65260f4c 100644 --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c @@ -595,13 +595,17 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf) if (pnetdev) { struct _adapter *padapter = netdev_priv(pnetdev); - usb_set_intfdata(pusb_intf, NULL); - release_firmware(padapter->fw); /* never exit with a firmware callback pending */ wait_for_completion(&padapter->rtl8712_fw_ready); + pnetdev = usb_get_intfdata(pusb_intf); + usb_set_intfdata(pusb_intf, NULL); + if (!pnetdev) + goto firmware_load_fail; + release_firmware(padapter->fw); if (drvpriv.drv_registered) padapter->surprise_removed = true; - unregister_netdev(pnetdev); /* will call netdev_close() */ + if (pnetdev->reg_state != NETREG_UNINITIALIZED) + unregister_netdev(pnetdev); /* will call netdev_close() */ flush_scheduled_work(); udelay(1); /* Stop driver mlme relation timer */ @@ -614,6 +618,7 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf) */ usb_put_dev(udev); } +firmware_load_fail: /* If we didn't unplug usb dongle and remove/insert module, driver * fails on sitesurvey for the first time when device is up. * Reset usb port for sitesurvey fail issue. diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c index ea3ea2a6b314..f6678ba6d4bc 100644 --- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c @@ -1845,12 +1845,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len); if (!pIE) return _FAIL; + if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates)) + return _FAIL; memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len); supportRateNum = ie_len; pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len); - if (pIE) + if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum)) memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len); return _SUCCESS; diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c index 59568d18ce23..5b72aa81d94c 100644 --- a/drivers/staging/sm750fb/sm750.c +++ b/drivers/staging/sm750fb/sm750.c @@ -898,6 +898,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index) fix->visual = FB_VISUAL_PSEUDOCOLOR; break; case 16: + case 24: case 32: fix->visual = FB_VISUAL_TRUECOLOR; break; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index af6bf0736b52..eb76cc2cbfd8 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -3257,6 +3257,7 @@ failed_platform_init: static int vchiq_remove(struct platform_device *pdev) { + platform_device_unregister(bcm2835_audio); platform_device_unregister(bcm2835_camera); vchiq_debugfs_deinit(); device_destroy(vchiq_class, vchiq_devid); diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/wilc_hif.c index 77d0732f451b..221e3d93db14 100644 --- a/drivers/staging/wilc1000/wilc_hif.c +++ b/drivers/staging/wilc1000/wilc_hif.c @@ -12,6 +12,8 @@ #define WILC_FALSE_FRMWR_CHANNEL 100 #define WILC_MAX_RATES_SUPPORTED 12 +#define WILC_SCAN_WID_LIST_SIZE 6 + struct wilc_rcvd_mac_info { u8 status; }; @@ -233,7 +235,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type, void *user_arg, struct cfg80211_scan_request *request) { int result = 0; - struct wid wid_list[5]; + struct wid wid_list[WILC_SCAN_WID_LIST_SIZE]; u32 index = 0; u32 i, scan_timeout; u8 *buffer; diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index e29c14e0ed49..ed4ff78dd02a 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -526,13 +526,8 @@ static void hfa384x_usb_defer(struct work_struct *data) */ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb) { - memset(hw, 0, sizeof(*hw)); hw->usb = usb; - /* set up the endpoints */ - hw->endp_in = usb_rcvbulkpipe(usb, 1); - hw->endp_out = usb_sndbulkpipe(usb, 2); - /* Set up the waitq */ init_waitqueue_head(&hw->cmdq); diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index d8d86761b790..9eee72aff723 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -61,11 +61,16 @@ static int prism2sta_probe_usb(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *dev; - + struct usb_endpoint_descriptor *bulk_in, *bulk_out; + struct usb_host_interface *iface_desc = interface->cur_altsetting; struct wlandevice *wlandev = NULL; struct hfa384x *hw = NULL; int result = 0; + result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL); + if (result) + goto failed; + dev = interface_to_usbdev(interface); wlandev = create_wlan(); if (!wlandev) { @@ -82,6 +87,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface, } /* Initialize the hw data */ + hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress); + hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress); hfa384x_create(hw, dev); hw->wlandev = wlandev; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d1ce94c608a9..bca183369ad8 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1386,14 +1386,27 @@ static u32 iscsit_do_crypto_hash_sg( sg = cmd->first_data_sg; page_off = cmd->first_data_sg_off; + if (data_length && page_off) { + struct scatterlist first_sg; + u32 len = min_t(u32, data_length, sg->length - page_off); + + sg_init_table(&first_sg, 1); + sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off); + + ahash_request_set_crypt(hash, &first_sg, NULL, len); + crypto_ahash_update(hash); + + data_length -= len; + sg = sg_next(sg); + } + while (data_length) { - u32 cur_len = min_t(u32, data_length, (sg->length - page_off)); + u32 cur_len = min_t(u32, data_length, sg->length); ahash_request_set_crypt(hash, sg, NULL, cur_len); crypto_ahash_update(hash); data_length -= cur_len; - page_off = 0; /* iscsit_map_iovec has already checked for invalid sg pointers */ sg = sg_next(sg); } diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 731ee67fe914..0cc5ea195273 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1172,7 +1172,7 @@ void iscsit_free_conn(struct iscsi_conn *conn) } void iscsi_target_login_sess_out(struct iscsi_conn *conn, - struct iscsi_np *np, bool zero_tsih, bool new_sess) + bool zero_tsih, bool new_sess) { if (!new_sess) goto old_sess_out; @@ -1190,7 +1190,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, conn->sess = NULL; old_sess_out: - iscsi_stop_login_thread_timer(np); /* * If login negotiation fails check if the Time2Retain timer * needs to be restarted. @@ -1430,8 +1429,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) new_sess_out: new_sess = true; old_sess_out: + iscsi_stop_login_thread_timer(np); tpg_np = conn->tpg_np; - iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess); + iscsi_target_login_sess_out(conn, zero_tsih, new_sess); new_sess = false; if (tpg) { diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 3b8e3639ff5d..fc95e6150253 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -22,8 +22,7 @@ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); extern void iscsit_free_conn(struct iscsi_conn *); extern int iscsit_start_kthreads(struct iscsi_conn *); extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); -extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, - bool, bool); +extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool); extern int iscsi_target_login_thread(void *); extern void iscsi_handle_login_thread_timeout(struct timer_list *t); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 685d771b51d4..e32d93b92742 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -535,12 +535,11 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) { - struct iscsi_np *np = login->np; bool zero_tsih = login->zero_tsih; iscsi_remove_failed_auth_entry(conn); iscsi_target_nego_release(conn); - iscsi_target_login_sess_out(conn, np, zero_tsih, true); + iscsi_target_login_sess_out(conn, zero_tsih, true); } struct conn_timeout { diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 3305b47fdf53..16d5a4e117a2 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -545,32 +545,15 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd) return 0; } -static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) +static int tcm_loop_queue_data_or_status(const char *func, + struct se_cmd *se_cmd, u8 scsi_status) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", - __func__, sc, sc->cmnd[0]); - - sc->result = SAM_STAT_GOOD; - set_host_byte(sc, DID_OK); - if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || - (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) - scsi_set_resid(sc, se_cmd->residual_count); - sc->scsi_done(sc); - return 0; -} - -static int tcm_loop_queue_status(struct se_cmd *se_cmd) -{ - struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, - struct tcm_loop_cmd, tl_se_cmd); - struct scsi_cmnd *sc = tl_cmd->sc; - - pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", - __func__, sc, sc->cmnd[0]); + func, sc, sc->cmnd[0]); if (se_cmd->sense_buffer && ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || @@ -581,7 +564,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) sc->result = SAM_STAT_CHECK_CONDITION; set_driver_byte(sc, DRIVER_SENSE); } else - sc->result = se_cmd->scsi_status; + sc->result = scsi_status; set_host_byte(sc, DID_OK); if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || @@ -591,6 +574,17 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) return 0; } +static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) +{ + return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD); +} + +static int tcm_loop_queue_status(struct se_cmd *se_cmd) +{ + return tcm_loop_queue_data_or_status(__func__, + se_cmd, se_cmd->scsi_status); +} + static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 853344415963..e7b3c6e5d574 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -138,6 +138,7 @@ int init_se_kmem_caches(void); void release_se_kmem_caches(void); u32 scsi_get_new_index(scsi_index_t); void transport_subsystem_check_init(void); +void transport_uninit_session(struct se_session *); unsigned char *transport_dump_cmd_direction(struct se_cmd *); void transport_dump_dev_state(struct se_device *, char *, int *); void transport_dump_dev_info(struct se_device *, struct se_lun *, diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7c78a5d02c08..b1f4be055f83 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -236,6 +236,11 @@ int transport_init_session(struct se_session *se_sess) } EXPORT_SYMBOL(transport_init_session); +void transport_uninit_session(struct se_session *se_sess) +{ + percpu_ref_exit(&se_sess->cmd_count); +} + /** * transport_alloc_session - allocate a session object and initialize it * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. @@ -579,7 +584,7 @@ void transport_free_session(struct se_session *se_sess) sbitmap_queue_free(&se_sess->sess_tag_pool); kvfree(se_sess->sess_cmd_map); } - percpu_ref_exit(&se_sess->cmd_count); + transport_uninit_session(se_sess); kmem_cache_free(se_sess_cache, se_sess); } EXPORT_SYMBOL(transport_free_session); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 9425354aef99..ea925b102b32 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -601,7 +601,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) size = round_up(size+offset, PAGE_SIZE); while (size) { - flush_dcache_page(virt_to_page(start)); + flush_dcache_page(vmalloc_to_page(start)); start += PAGE_SIZE; size -= PAGE_SIZE; } @@ -676,8 +676,10 @@ static void scatter_data_area(struct tcmu_dev *udev, from = kmap_atomic(sg_page(sg)) + sg->offset; while (sg_remaining > 0) { if (block_remaining == 0) { - if (to) + if (to) { + flush_dcache_page(page); kunmap_atomic(to); + } block_remaining = DATA_BLOCK_SIZE; dbi = tcmu_cmd_get_dbi(tcmu_cmd); @@ -722,7 +724,6 @@ static void scatter_data_area(struct tcmu_dev *udev, memcpy(to + offset, from + sg->length - sg_remaining, copy_bytes); - tcmu_flush_dcache_range(to, copy_bytes); } sg_remaining -= copy_bytes; @@ -731,8 +732,10 @@ static void scatter_data_area(struct tcmu_dev *udev, kunmap_atomic(from - sg->offset); } - if (to) + if (to) { + flush_dcache_page(page); kunmap_atomic(to); + } } static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, @@ -778,13 +781,13 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, dbi = tcmu_cmd_get_dbi(cmd); page = tcmu_get_block_page(udev, dbi); from = kmap_atomic(page); + flush_dcache_page(page); } copy_bytes = min_t(size_t, sg_remaining, block_remaining); if (read_len < copy_bytes) copy_bytes = read_len; offset = DATA_BLOCK_SIZE - block_remaining; - tcmu_flush_dcache_range(from, copy_bytes); memcpy(to + sg->length - sg_remaining, from + offset, copy_bytes); @@ -882,41 +885,24 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, return command_size; } -static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, - struct timer_list *timer) +static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, + struct timer_list *timer) { - struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; - int cmd_id; - - if (tcmu_cmd->cmd_id) - goto setup_timer; - - cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); - if (cmd_id < 0) { - pr_err("tcmu: Could not allocate cmd id.\n"); - return cmd_id; - } - tcmu_cmd->cmd_id = cmd_id; - - pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id, - udev->name, tmo / MSEC_PER_SEC); - -setup_timer: if (!tmo) - return 0; + return; tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); if (!timer_pending(timer)) mod_timer(timer, tcmu_cmd->deadline); - return 0; + pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd, + tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC); } static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; unsigned int tmo; - int ret; /* * For backwards compat if qfull_time_out is not set use @@ -931,13 +917,11 @@ static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) else tmo = TCMU_TIME_OUT; - ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); - if (ret) - return ret; + tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); - pr_debug("adding cmd %u on dev %s to ring space wait queue\n", - tcmu_cmd->cmd_id, udev->name); + pr_debug("adding cmd %p on dev %s to ring space wait queue\n", + tcmu_cmd, udev->name); return 0; } @@ -959,7 +943,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) struct tcmu_mailbox *mb; struct tcmu_cmd_entry *entry; struct iovec *iov; - int iov_cnt, ret; + int iov_cnt, cmd_id; uint32_t cmd_head; uint64_t cdb_off; bool copy_to_data_area; @@ -1026,7 +1010,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) entry->hdr.cmd_id = 0; /* not used for PAD */ entry->hdr.kflags = 0; entry->hdr.uflags = 0; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_flush_dcache_range(entry, sizeof(entry->hdr)); UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); @@ -1060,14 +1044,21 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) } entry->req.iov_bidi_cnt = iov_cnt; - ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, - &udev->cmd_timer); - if (ret) { - tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); + cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); + if (cmd_id < 0) { + pr_err("tcmu: Could not allocate cmd id.\n"); + tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); *scsi_err = TCM_OUT_OF_RESOURCES; return -1; } + tcmu_cmd->cmd_id = cmd_id; + + pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id, + tcmu_cmd, udev->name); + + tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); + entry->hdr.cmd_id = tcmu_cmd->cmd_id; /* @@ -1084,7 +1075,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) cdb_off = CMDR_OFF + cmd_head + base_command_size; memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); entry->req.cdb_off = cdb_off; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_flush_dcache_range(entry, command_size); UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); @@ -1232,7 +1223,14 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + /* + * Flush max. up to end of cmd ring since current entry might + * be a padding that is shorter than sizeof(*entry) + */ + size_t ring_left = head_to_end(udev->cmdr_last_cleaned, + udev->cmdr_size); + tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? + ring_left : sizeof(*entry)); if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { UPDATE_HEAD(udev->cmdr_last_cleaned, @@ -1279,50 +1277,39 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) return handled; } -static int tcmu_check_expired_cmd(int id, void *p, void *data) +static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) { - struct tcmu_cmd *cmd = p; - struct tcmu_dev *udev = cmd->tcmu_dev; - u8 scsi_status; struct se_cmd *se_cmd; - bool is_running; - - if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) - return 0; if (!time_after(jiffies, cmd->deadline)) - return 0; + return; - is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags); + set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); + list_del_init(&cmd->queue_entry); se_cmd = cmd->se_cmd; + cmd->se_cmd = NULL; - if (is_running) { - /* - * If cmd_time_out is disabled but qfull is set deadline - * will only reflect the qfull timeout. Ignore it. - */ - if (!udev->cmd_time_out) - return 0; + pr_debug("Timing out inflight cmd %u on dev %s.\n", + cmd->cmd_id, cmd->tcmu_dev->name); - set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); - /* - * target_complete_cmd will translate this to LUN COMM FAILURE - */ - scsi_status = SAM_STAT_CHECK_CONDITION; - list_del_init(&cmd->queue_entry); - cmd->se_cmd = NULL; - } else { - list_del_init(&cmd->queue_entry); - idr_remove(&udev->commands, id); - tcmu_free_cmd(cmd); - scsi_status = SAM_STAT_TASK_SET_FULL; - } + target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION); +} - pr_debug("Timing out cmd %u on dev %s that is %s.\n", - id, udev->name, is_running ? "inflight" : "queued"); +static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd) +{ + struct se_cmd *se_cmd; - target_complete_cmd(se_cmd, scsi_status); - return 0; + if (!time_after(jiffies, cmd->deadline)) + return; + + pr_debug("Timing out queued cmd %p on dev %s.\n", + cmd, cmd->tcmu_dev->name); + + list_del_init(&cmd->queue_entry); + se_cmd = cmd->se_cmd; + tcmu_free_cmd(cmd); + + target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); } static void tcmu_device_timedout(struct tcmu_dev *udev) @@ -1407,16 +1394,15 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) return &udev->se_dev; } -static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) +static void run_qfull_queue(struct tcmu_dev *udev, bool fail) { struct tcmu_cmd *tcmu_cmd, *tmp_cmd; LIST_HEAD(cmds); - bool drained = true; sense_reason_t scsi_ret; int ret; if (list_empty(&udev->qfull_queue)) - return true; + return; pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); @@ -1425,11 +1411,10 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { list_del_init(&tcmu_cmd->queue_entry); - pr_debug("removing cmd %u on dev %s from queue\n", - tcmu_cmd->cmd_id, udev->name); + pr_debug("removing cmd %p on dev %s from queue\n", + tcmu_cmd, udev->name); if (fail) { - idr_remove(&udev->commands, tcmu_cmd->cmd_id); /* * We were not able to even start the command, so * fail with busy to allow a retry in case runner @@ -1444,10 +1429,8 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); if (ret < 0) { - pr_debug("cmd %u on dev %s failed with %u\n", - tcmu_cmd->cmd_id, udev->name, scsi_ret); - - idr_remove(&udev->commands, tcmu_cmd->cmd_id); + pr_debug("cmd %p on dev %s failed with %u\n", + tcmu_cmd, udev->name, scsi_ret); /* * Ignore scsi_ret for now. target_complete_cmd * drops it. @@ -1462,13 +1445,11 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail) * the queue */ list_splice_tail(&cmds, &udev->qfull_queue); - drained = false; break; } } tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); - return drained; } static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) @@ -1652,6 +1633,8 @@ static void tcmu_dev_kref_release(struct kref *kref) if (tcmu_check_and_free_pending_cmd(cmd) != 0) all_expired = false; } + if (!list_empty(&udev->qfull_queue)) + all_expired = false; idr_destroy(&udev->commands); WARN_ON(!all_expired); @@ -2037,9 +2020,6 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) mutex_lock(&udev->cmdr_lock); idr_for_each_entry(&udev->commands, cmd, i) { - if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags)) - continue; - pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", cmd->cmd_id, udev->name, test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); @@ -2077,6 +2057,8 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) del_timer(&udev->cmd_timer); + run_qfull_queue(udev, false); + mutex_unlock(&udev->cmdr_lock); } @@ -2698,6 +2680,7 @@ static void find_free_blocks(void) static void check_timedout_devices(void) { struct tcmu_dev *udev, *tmp_dev; + struct tcmu_cmd *cmd, *tmp_cmd; LIST_HEAD(devs); spin_lock_bh(&timed_out_udevs_lock); @@ -2708,9 +2691,24 @@ static void check_timedout_devices(void) spin_unlock_bh(&timed_out_udevs_lock); mutex_lock(&udev->cmdr_lock); - idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); - tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); + /* + * If cmd_time_out is disabled but qfull is set deadline + * will only reflect the qfull timeout. Ignore it. + */ + if (udev->cmd_time_out) { + list_for_each_entry_safe(cmd, tmp_cmd, + &udev->inflight_queue, + queue_entry) { + tcmu_check_expired_ring_cmd(cmd); + } + tcmu_set_next_deadline(&udev->inflight_queue, + &udev->cmd_timer); + } + list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, + queue_entry) { + tcmu_check_expired_queue_cmd(cmd); + } tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); mutex_unlock(&udev->cmdr_lock); diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index b9b1e92c6f8d..9d24e85b0863 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -479,7 +479,7 @@ int target_xcopy_setup_pt(void) memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); ret = transport_init_session(&xcopy_pt_sess); if (ret < 0) - return ret; + goto destroy_wq; xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; @@ -488,12 +488,19 @@ int target_xcopy_setup_pt(void) xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl; return 0; + +destroy_wq: + destroy_workqueue(xcopy_wq); + xcopy_wq = NULL; + return ret; } void target_xcopy_release_pt(void) { - if (xcopy_wq) + if (xcopy_wq) { destroy_workqueue(xcopy_wq); + transport_uninit_session(&xcopy_pt_sess); + } } /* diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 6b9865c786ba..c37886a26712 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -210,11 +210,11 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, int i; struct freq_table *freq_table = cpufreq_cdev->freq_table; - for (i = 1; i <= cpufreq_cdev->max_level; i++) - if (power > freq_table[i].power) + for (i = 0; i < cpufreq_cdev->max_level; i++) + if (power >= freq_table[i].power) break; - return freq_table[i - 1].frequency; + return freq_table[i].frequency; } /** diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 6b1e61f4adf0..200f5e78518d 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -683,7 +683,7 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match); static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) { struct device_node *np; - int ret; + int ret = 0; data->policy = cpufreq_cpu_get(0); if (!data->policy) { @@ -698,7 +698,7 @@ static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) if (IS_ERR(data->cdev[0])) { ret = PTR_ERR(data->cdev[0]); cpufreq_cpu_put(data->policy); - return ret; + goto put_node; } } @@ -710,10 +710,12 @@ static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data) ret); cpufreq_cooling_unregister(data->cdev[0]); } - return ret; } - return 0; +put_node: + of_node_put(np); + + return ret; } static void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data) diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index a7bbd8584ae2..e7b6f6f256a9 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -74,7 +74,7 @@ static void int3403_notify(acpi_handle handle, THERMAL_TRIP_CHANGED); break; default: - dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); + dev_dbg(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); break; } } diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index acf4854cbb8b..2783973b101c 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -211,6 +211,9 @@ enum { /* The total number of temperature sensors in the MT8183 */ #define MT8183_NUM_SENSORS 6 +/* The number of banks in the MT8183 */ +#define MT8183_NUM_ZONES 1 + /* The number of sensing points per bank */ #define MT8183_NUM_SENSORS_PER_ZONE 6 @@ -498,7 +501,7 @@ static const struct mtk_thermal_data mt7622_thermal_data = { static const struct mtk_thermal_data mt8183_thermal_data = { .auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL, - .num_banks = MT8183_NUM_SENSORS_PER_ZONE, + .num_banks = MT8183_NUM_ZONES, .num_sensors = MT8183_NUM_SENSORS, .vts_index = mt8183_vts_index, .cali_val = MT8183_CALIBRATION, @@ -591,8 +594,7 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank) u32 raw; for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) { - raw = readl(mt->thermal_base + - conf->msr[conf->bank_data[bank->id].sensors[i]]); + raw = readl(mt->thermal_base + conf->msr[i]); temp = raw_to_mcelsius(mt, conf->bank_data[bank->id].sensors[i], @@ -733,8 +735,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num, for (i = 0; i < conf->bank_data[num].num_sensors; i++) writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]], - mt->thermal_base + - conf->adcpnp[conf->bank_data[num].sensors[i]]); + mt->thermal_base + conf->adcpnp[i]); writel((1 << conf->bank_data[num].num_sensors) - 1, controller_base + TEMP_MONCTL0); diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index bf7bae42c141..6dc879fea9c8 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved. */ #include @@ -191,7 +191,7 @@ static int qpnp_tm_get_temp(void *data, int *temp) chip->temp = mili_celsius; } - *temp = chip->temp < 0 ? 0 : chip->temp; + *temp = chip->temp; return 0; } diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 755d2b5bd2c2..1ab2ffff4e7c 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -169,7 +169,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp) { struct rcar_gen3_thermal_tsc *tsc = devdata; int mcelsius, val; - u32 reg; + int reg; /* Read register and convert to mili Celsius */ reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK; diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c index 63b02bfb2adf..fdb8a495ab69 100644 --- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c +++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c @@ -37,20 +37,21 @@ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = { /* * Temperature values in milli degree celsius - * ADC code values from 530 to 923 + * ADC code values from 13 to 107, see TRM + * "18.4.10.2.3 ADC Codes Versus Temperature". */ static const int omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = { - -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000, - -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000, - -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000, - 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000, - 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000, - 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000, - 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000, - 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000, - 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000, - 117000, 118000, 120000, 122000, 123000, + -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, + -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000, + -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000, + 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500, + 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000, + 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000, + 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000, + 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000, + 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000, + 115000, 117000, 118500, 120000, 122000, 123500, 125000, }; /* OMAP4430 data */ diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h index a453ff8eb313..9a3955c3853b 100644 --- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h +++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h @@ -53,9 +53,13 @@ * and thresholds for OMAP4430. */ -/* ADC conversion table limits */ -#define OMAP4430_ADC_START_VALUE 0 -#define OMAP4430_ADC_END_VALUE 127 +/* + * ADC conversion table limits. Ignore values outside the TRM listed + * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter + * "18.4.10.2.3 ADC Codes Versus Temperature". + */ +#define OMAP4430_ADC_START_VALUE 13 +#define OMAP4430_ADC_END_VALUE 107 /* bandgap clock limits (no control on 4430) */ #define OMAP4430_MAX_FREQ 32768 #define OMAP4430_MIN_FREQ 32768 diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index d3e959d01606..2ce4b19f312a 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -169,7 +169,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, data = ti_bandgap_get_sensor_data(bgp, id); - if (!data || IS_ERR(data)) + if (IS_ERR_OR_NULL(data)) data = ti_thermal_build_data(bgp, id); if (!data) @@ -196,7 +196,7 @@ int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id) data = ti_bandgap_get_sensor_data(bgp, id); - if (data && data->ti_thermal) { + if (!IS_ERR_OR_NULL(data) && data->ti_thermal) { if (data->our_zone) thermal_zone_device_unregister(data->ti_thermal); } @@ -262,7 +262,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id) data = ti_bandgap_get_sensor_data(bgp, id); - if (data) { + if (!IS_ERR_OR_NULL(data)) { cpufreq_cooling_unregister(data->cool_dev); if (data->policy) cpufreq_cpu_put(data->policy); diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 36a3eb4ad4c5..75408b9f232d 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -665,11 +665,10 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, * FIXME: lock against link layer control transmissions */ -static void gsm_data_kick(struct gsm_mux *gsm) +static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg, *nmsg; int len; - int skip_sof = 0; list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) { if (gsm->constipated && msg->addr) @@ -691,18 +690,23 @@ static void gsm_data_kick(struct gsm_mux *gsm) print_hex_dump_bytes("gsm_data_kick: ", DUMP_PREFIX_OFFSET, gsm->txframe, len); - - if (gsm->output(gsm, gsm->txframe + skip_sof, - len - skip_sof) < 0) + if (gsm->output(gsm, gsm->txframe, len) < 0) break; /* FIXME: Can eliminate one SOF in many more cases */ gsm->tx_bytes -= msg->len; - /* For a burst of frames skip the extra SOF within the - burst */ - skip_sof = 1; list_del(&msg->list); kfree(msg); + + if (dlci) { + tty_port_tty_wakeup(&dlci->port); + } else { + int i = 0; + + for (i = 0; i < NUM_DLCI; i++) + if (gsm->dlci[i]) + tty_port_tty_wakeup(&gsm->dlci[i]->port); + } } } @@ -754,7 +758,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) /* Add to the actual output queue */ list_add_tail(&msg->list, &gsm->tx_list); gsm->tx_bytes += msg->len; - gsm_data_kick(gsm); + gsm_data_kick(gsm, dlci); } /** @@ -1215,7 +1219,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, gsm_control_reply(gsm, CMD_FCON, NULL, 0); /* Kick the link in case it is idling */ spin_lock_irqsave(&gsm->tx_lock, flags); - gsm_data_kick(gsm); + gsm_data_kick(gsm, NULL); spin_unlock_irqrestore(&gsm->tx_lock, flags); break; case CMD_FCOFF: @@ -2525,7 +2529,7 @@ static void gsmld_write_wakeup(struct tty_struct *tty) /* Queue poll */ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); spin_lock_irqsave(&gsm->tx_lock, flags); - gsm_data_kick(gsm); + gsm_data_kick(gsm, NULL); if (gsm->tx_bytes < TX_THRESH_LO) { gsm_dlci_data_sweep(gsm); } diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 28bdbd7b4ab2..2675771a03a0 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -524,6 +524,7 @@ static void __init serial8250_isa_init_ports(void) */ up->mcr_mask = ~ALPHA_KLUDGE_MCR; up->mcr_force = ALPHA_KLUDGE_MCR; + serial8250_set_defaults(up); } /* chain base port ops to support Remote Supervisor Adapter */ @@ -547,7 +548,6 @@ static void __init serial8250_isa_init_ports(void) port->membase = old_serial_port[i].iomem_base; port->iotype = old_serial_port[i].io_type; port->regshift = old_serial_port[i].iomem_reg_shift; - serial8250_set_defaults(up); port->irqflags |= irqflag; if (serial8250_isa_config != NULL) @@ -1026,7 +1026,7 @@ int serial8250_register_8250_port(struct uart_8250_port *up) gpios = mctrl_gpio_init(&uart->port, 0); if (IS_ERR(gpios)) { ret = PTR_ERR(gpios); - goto out_unlock; + goto err; } else { uart->gpios = gpios; } @@ -1075,8 +1075,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up) serial8250_apply_quirks(uart); ret = uart_add_one_port(&serial8250_reg, &uart->port); - if (ret == 0) - ret = uart->port.line; + if (ret) + goto err; + + ret = uart->port.line; } else { dev_info(uart->port.dev, "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", @@ -1098,10 +1100,14 @@ int serial8250_register_8250_port(struct uart_8250_port *up) } } -out_unlock: mutex_unlock(&serial_mutex); return ret; + +err: + uart->port.dev = NULL; + mutex_unlock(&serial_mutex); + return ret; } EXPORT_SYMBOL(serial8250_register_8250_port); diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index e1268646ee56..93367dea4d8a 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -307,7 +307,17 @@ static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p) * devices will export them as GPIOs, so we pre-configure them safely * as inputs. */ - u8 dir = pcidev->vendor == PCI_VENDOR_ID_EXAR ? 0xff : 0x00; + + u8 dir = 0x00; + + if ((pcidev->vendor == PCI_VENDOR_ID_EXAR) && + (pcidev->subsystem_vendor != PCI_VENDOR_ID_SEALEVEL)) { + // Configure GPIO as inputs for Commtech adapters + dir = 0xff; + } else { + // Configure GPIO as outputs for SeaLevel adapters + dir = 0x00; + } writeb(0x00, p + UART_EXAR_MPIOINT_7_0); writeb(0x00, p + UART_EXAR_MPIOLVL_7_0); @@ -715,6 +725,24 @@ static const struct exar8250_board pbn_exar_XR17V35x = { .exit = pci_xr17v35x_exit, }; +static const struct exar8250_board pbn_fastcom35x_2 = { + .num_ports = 2, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_4 = { + .num_ports = 4, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_8 = { + .num_ports = 8, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + static const struct exar8250_board pbn_exar_XR17V4358 = { .num_ports = 12, .setup = pci_xr17v35x_setup, @@ -785,9 +813,9 @@ static const struct pci_device_id exar_pci_tbl[] = { EXAR_DEVICE(EXAR, EXAR_XR17V358, pbn_exar_XR17V35x), EXAR_DEVICE(EXAR, EXAR_XR17V4358, pbn_exar_XR17V4358), EXAR_DEVICE(EXAR, EXAR_XR17V8358, pbn_exar_XR17V8358), - EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_exar_XR17V35x), - EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_exar_XR17V35x), - EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_exar_XR17V35x), + EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_fastcom35x_2), + EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_fastcom35x_4), + EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_fastcom35x_8), EXAR_DEVICE(COMMTECH, COMMTECH_4222PCI335, pbn_fastcom335_2), EXAR_DEVICE(COMMTECH, COMMTECH_4224PCI335, pbn_fastcom335_4), diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index 4d067f515f74..2b59a4305077 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -305,8 +305,21 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, } #endif + /* + * Store the requested baud rate before calling the generic 8250 + * set_termios method. Standard 8250 port expects bauds to be + * no higher than (uartclk / 16) so the baud will be clamped if it + * gets out of that bound. Mediatek 8250 port supports speed + * higher than that, therefore we'll get original baud rate back + * after calling the generic set_termios method and recalculate + * the speed later in this method. + */ + baud = tty_termios_baud_rate(termios); + serial8250_do_set_termios(port, termios, old); + tty_termios_encode_baud_rate(termios, baud, baud); + /* * Mediatek UARTs use an extra highspeed register (MTK_UART_HIGHS) * @@ -338,6 +351,11 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, */ spin_lock_irqsave(&port->lock, flags); + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + /* set DLAB we have cval saved in up->lcr from the call to the core */ serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB); serial_dl_write(up, quot); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 8a01d034f9d1..8814ff38aa67 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1871,12 +1871,6 @@ pci_moxa_setup(struct serial_private *priv, #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 #define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253 -#define PCI_VENDOR_ID_PERICOM 0x12D8 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 -#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 - #define PCI_VENDOR_ID_ACCESIO 0x494f #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051 #define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053 @@ -5574,6 +5568,17 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_wch384_4 }, + /* + * Realtek RealManage + */ + { PCI_VENDOR_ID_REALTEK, 0x816a, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_REALTEK, 0x816b, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_1_115200 }, + /* Fintek PCI serial cards */ { PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 }, { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 }, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 2c65c775bf5a..90f09ed6e5ad 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2198,6 +2198,10 @@ int serial8250_do_startup(struct uart_port *port) if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { unsigned char iir1; + + if (port->irqflags & IRQF_SHARED) + disable_irq_nosync(port->irq); + /* * Test for UARTs that do not reassert THRE when the * transmitter is idle and the interrupt has already @@ -2207,8 +2211,6 @@ int serial8250_do_startup(struct uart_port *port) * allow register changes to become visible. */ spin_lock_irqsave(&port->lock, flags); - if (up->port.irqflags & IRQF_SHARED) - disable_irq_nosync(port->irq); wait_for_xmitr(up, UART_LSR_THRE); serial_port_out_sync(port, UART_IER, UART_IER_THRI); @@ -2220,9 +2222,10 @@ int serial8250_do_startup(struct uart_port *port) iir = serial_port_in(port, UART_IIR); serial_port_out(port, UART_IER, 0); + spin_unlock_irqrestore(&port->lock, flags); + if (port->irqflags & IRQF_SHARED) enable_irq(port->irq); - spin_unlock_irqrestore(&port->lock, flags); /* * If the interrupt is not reasserted, or we otherwise @@ -2539,6 +2542,8 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { + unsigned int tolerance = port->uartclk / 100; + /* * Ask the core to calculate the divisor for us. * Allow 1% tolerance at the upper limit so uart clks marginally @@ -2547,7 +2552,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port, */ return uart_get_baud_rate(port, termios, old, port->uartclk / 16 / UART_DIV_MAX, - port->uartclk); + (port->uartclk + tolerance) / 16); } void diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index b0b689546395..a8a538b34b53 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2252,9 +2252,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) clk_disable(uap->clk); } -static void __init -pl011_console_get_options(struct uart_amba_port *uap, int *baud, - int *parity, int *bits) +static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, + int *parity, int *bits) { if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) { unsigned int lcr_h, ibrd, fbrd; @@ -2287,7 +2286,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud, } } -static int __init pl011_console_setup(struct console *co, char *options) +static int pl011_console_setup(struct console *co, char *options) { struct uart_amba_port *uap; int baud = 38400; @@ -2355,8 +2354,8 @@ static int __init pl011_console_setup(struct console *co, char *options) * * Returns 0 if console matches; otherwise non-zero to use default matching */ -static int __init pl011_console_match(struct console *co, char *name, int idx, - char *options) +static int pl011_console_match(struct console *co, char *name, int idx, + char *options) { unsigned char iotype; resource_size_t addr; @@ -2585,6 +2584,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, uap->port.fifosize = uap->fifosize; uap->port.flags = UPF_BOOT_AUTOCONF; uap->port.line = index; + spin_lock_init(&uap->port.lock); amba_ports[index] = uap; @@ -2593,7 +2593,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, static int pl011_register_port(struct uart_amba_port *uap) { - int ret; + int ret, i; /* Ensure interrupts from this UART are masked and cleared */ pl011_write(0, uap, REG_IMSC); @@ -2604,6 +2604,9 @@ static int pl011_register_port(struct uart_amba_port *uap) if (ret < 0) { dev_err(uap->port.dev, "Failed to register AMBA-PL011 driver\n"); + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) + if (amba_ports[i] == uap) + amba_ports[i] = NULL; return ret; } } diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index c7d51b51898f..f5608ad68ae1 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -20,6 +20,7 @@ #include #include #include +#include #define MAX_CONFIG_LEN 40 @@ -27,6 +28,7 @@ static struct kgdb_io kgdboc_io_ops; /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ static int configured = -1; +static DEFINE_MUTEX(config_mutex); static char config[MAX_CONFIG_LEN]; static struct kparam_string kps = { @@ -38,6 +40,8 @@ static int kgdboc_use_kms; /* 1 if we use kernel mode switching */ static struct tty_driver *kgdb_tty_driver; static int kgdb_tty_line; +static struct platform_device *kgdboc_pdev; + #ifdef CONFIG_KDB_KEYBOARD static int kgdboc_reset_connect(struct input_handler *handler, struct input_dev *dev, @@ -133,11 +137,13 @@ static void kgdboc_unregister_kbd(void) static void cleanup_kgdboc(void) { + if (configured != 1) + return; + if (kgdb_unregister_nmi_console()) return; kgdboc_unregister_kbd(); - if (configured == 1) - kgdb_unregister_io_module(&kgdboc_io_ops); + kgdb_unregister_io_module(&kgdboc_io_ops); } static int configure_kgdboc(void) @@ -200,20 +206,79 @@ nmi_con_failed: kgdb_unregister_io_module(&kgdboc_io_ops); noconfig: kgdboc_unregister_kbd(); - config[0] = 0; configured = 0; - cleanup_kgdboc(); return err; } +static int kgdboc_probe(struct platform_device *pdev) +{ + int ret = 0; + + mutex_lock(&config_mutex); + if (configured != 1) { + ret = configure_kgdboc(); + + /* Convert "no device" to "defer" so we'll keep trying */ + if (ret == -ENODEV) + ret = -EPROBE_DEFER; + } + mutex_unlock(&config_mutex); + + return ret; +} + +static struct platform_driver kgdboc_platform_driver = { + .probe = kgdboc_probe, + .driver = { + .name = "kgdboc", + .suppress_bind_attrs = true, + }, +}; + static int __init init_kgdboc(void) { - /* Already configured? */ - if (configured == 1) + int ret; + + /* + * kgdboc is a little bit of an odd "platform_driver". It can be + * up and running long before the platform_driver object is + * created and thus doesn't actually store anything in it. There's + * only one instance of kgdb so anything is stored as global state. + * The platform_driver is only created so that we can leverage the + * kernel's mechanisms (like -EPROBE_DEFER) to call us when our + * underlying tty is ready. Here we init our platform driver and + * then create the single kgdboc instance. + */ + ret = platform_driver_register(&kgdboc_platform_driver); + if (ret) + return ret; + + kgdboc_pdev = platform_device_alloc("kgdboc", PLATFORM_DEVID_NONE); + if (!kgdboc_pdev) { + ret = -ENOMEM; + goto err_did_register; + } + + ret = platform_device_add(kgdboc_pdev); + if (!ret) return 0; - return configure_kgdboc(); + platform_device_put(kgdboc_pdev); + +err_did_register: + platform_driver_unregister(&kgdboc_platform_driver); + return ret; +} + +static void exit_kgdboc(void) +{ + mutex_lock(&config_mutex); + cleanup_kgdboc(); + mutex_unlock(&config_mutex); + + platform_device_unregister(kgdboc_pdev); + platform_driver_unregister(&kgdboc_platform_driver); } static int kgdboc_get_char(void) @@ -236,24 +301,20 @@ static int param_set_kgdboc_var(const char *kmessage, const struct kernel_param *kp) { size_t len = strlen(kmessage); + int ret = 0; if (len >= MAX_CONFIG_LEN) { pr_err("config string too long\n"); return -ENOSPC; } - /* Only copy in the string if the init function has not run yet */ - if (configured < 0) { - strcpy(config, kmessage); - return 0; - } - if (kgdb_connected) { pr_err("Cannot reconfigure while KGDB is connected.\n"); - return -EBUSY; } + mutex_lock(&config_mutex); + strcpy(config, kmessage); /* Chop out \n char as a result of echo */ if (len && config[len - 1] == '\n') @@ -262,8 +323,30 @@ static int param_set_kgdboc_var(const char *kmessage, if (configured == 1) cleanup_kgdboc(); - /* Go and configure with the new params. */ - return configure_kgdboc(); + /* + * Configure with the new params as long as init already ran. + * Note that we can get called before init if someone loads us + * with "modprobe kgdboc kgdboc=..." or if they happen to use the + * the odd syntax of "kgdboc.kgdboc=..." on the kernel command. + */ + if (configured >= 0) + ret = configure_kgdboc(); + + /* + * If we couldn't configure then clear out the config. Note that + * specifying an invalid config on the kernel command line vs. + * through sysfs have slightly different behaviors. If we fail + * to configure what was specified on the kernel command line + * we'll leave it in the 'config' and return -EPROBE_DEFER from + * our probe. When specified through sysfs userspace is + * responsible for loading the tty driver before setting up. + */ + if (ret) + config[0] = '\0'; + + mutex_unlock(&config_mutex); + + return ret; } static int dbg_restore_graphics; @@ -326,15 +409,8 @@ __setup("kgdboc=", kgdboc_option_setup); /* This is only available if kgdboc is a built in for early debugging */ static int __init kgdboc_early_init(char *opt) { - /* save the first character of the config string because the - * init routine can destroy it. - */ - char save_ch; - kgdboc_option_setup(opt); - save_ch = config[0]; - init_kgdboc(); - config[0] = save_ch; + configure_kgdboc(); return 0; } @@ -342,7 +418,7 @@ early_param("ekgdboc", kgdboc_early_init); #endif /* CONFIG_KGDB_SERIAL_CONSOLE */ module_init(init_kgdboc); -module_exit(cleanup_kgdboc); +module_exit(exit_kgdboc); module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644); MODULE_PARM_DESC(kgdboc, "[,baud]"); MODULE_DESCRIPTION("KGDB Console TTY Driver"); diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index e34525970682..5d483e996514 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -1701,21 +1701,21 @@ static int mxs_auart_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; - goto out_disable_clks; + goto out_iounmap; } s->port.irq = irq; ret = devm_request_irq(&pdev->dev, irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s); if (ret) - goto out_disable_clks; + goto out_iounmap; platform_set_drvdata(pdev, s); ret = mxs_auart_init_gpios(s, &pdev->dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize GPIOs.\n"); - goto out_disable_clks; + goto out_iounmap; } /* @@ -1723,7 +1723,7 @@ static int mxs_auart_probe(struct platform_device *pdev) */ ret = mxs_auart_request_gpio_irq(s); if (ret) - goto out_disable_clks; + goto out_iounmap; auart_port[s->port.line] = s; @@ -1749,6 +1749,9 @@ out_free_qpio_irq: mxs_auart_free_gpio_irq(s); auart_port[pdev->id] = NULL; +out_iounmap: + iounmap(s->port.membase); + out_disable_clks: if (is_asm9260_auart(s)) { clk_disable_unprepare(s->clk); @@ -1764,6 +1767,7 @@ static int mxs_auart_remove(struct platform_device *pdev) uart_remove_one_port(&auart_driver, &s->port); auart_port[pdev->id] = NULL; mxs_auart_free_gpio_irq(s); + iounmap(s->port.membase); if (is_asm9260_auart(s)) { clk_disable_unprepare(s->clk); clk_disable_unprepare(s->clk_ahb); diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index f98a79172ad2..0b184256034f 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1063,7 +1063,7 @@ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport) } #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE -static int __init qcom_geni_console_setup(struct console *co, char *options) +static int qcom_geni_console_setup(struct console *co, char *options) { struct uart_port *uport; struct qcom_geni_serial_port *port; diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 83fd51607741..71f99e921759 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1791,9 +1791,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, ourport->tx_irq = ret + 1; } - ret = platform_get_irq(platdev, 1); - if (ret > 0) - ourport->tx_irq = ret; + if (!s3c24xx_serial_has_interrupt_mask(port)) { + ret = platform_get_irq(platdev, 1); + if (ret > 0) + ourport->tx_irq = ret; + } /* * DMA is currently supported only on DT platforms, if DMA properties * are specified. diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index 2f599515c133..51c3f579ccd0 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -651,11 +651,14 @@ static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup, ch = (unsigned char) tegra_uart_read(tup, UART_RX); tup->uport.icount.rx++; - if (!uart_handle_sysrq_char(&tup->uport, ch) && tty) - tty_insert_flip_char(tty, ch, flag); + if (uart_handle_sysrq_char(&tup->uport, ch)) + continue; if (tup->uport.ignore_status_mask & UART_LSR_DR) continue; + + if (tty) + tty_insert_flip_char(tty, ch, flag); } while (1); } diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 2f72514d63ed..bf83e6c212f5 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -937,7 +937,7 @@ static int stm32_init_port(struct stm32_port *stm32port, stm32_init_rs485(port, pdev); if (stm32port->info->cfg.has_wakeup) { - stm32port->wakeirq = platform_get_irq(pdev, 1); + stm32port->wakeirq = platform_get_irq_optional(pdev, 1); if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO) return stm32port->wakeirq ? : -ENODEV; } diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 3cb9aacfe0b2..8948970f795e 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1445,7 +1445,6 @@ static int cdns_uart_probe(struct platform_device *pdev) cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE cdns_uart_uart_driver.cons = &cdns_uart_console; - cdns_uart_console.index = id; #endif rc = uart_register_driver(&cdns_uart_uart_driver); @@ -1560,8 +1559,10 @@ static int cdns_uart_probe(struct platform_device *pdev) * If register_console() don't assign value, then console_port pointer * is cleanup. */ - if (!console_port) + if (!console_port) { + cdns_uart_console.index = id; console_port = port; + } #endif rc = uart_add_one_port(&cdns_uart_uart_driver, port); @@ -1574,8 +1575,10 @@ static int cdns_uart_probe(struct platform_device *pdev) #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE /* This is not port which is used for console that's why clean it up */ if (console_port == port && - !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) + !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) { console_port = NULL; + cdns_uart_console.index = -1; + } #endif cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node, diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index fd0361d72738..d07a9c9c7608 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1092,10 +1092,19 @@ static const struct tty_port_operations vc_port_ops = { .destruct = vc_port_destruct, }; +/* + * Change # of rows and columns (0 means unchanged/the size of fg_console) + * [this is to be used together with some user program + * like resize that changes the hardware videomode] + */ +#define VC_MAXCOL (32767) +#define VC_MAXROW (32767) + int vc_allocate(unsigned int currcons) /* return 0 on success */ { struct vt_notifier_param param; struct vc_data *vc; + int err; WARN_CONSOLE_UNLOCKED(); @@ -1125,6 +1134,11 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ if (!*vc->vc_uni_pagedir_loc) con_set_default_unimap(vc); + err = -EINVAL; + if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW || + vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size) + goto err_free; + err = -ENOMEM; vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) goto err_free; @@ -1143,7 +1157,7 @@ err_free: visual_deinit(vc); kfree(vc); vc_cons[currcons].d = NULL; - return -ENOMEM; + return err; } static inline int resize_screen(struct vc_data *vc, int width, int height, @@ -1158,14 +1172,6 @@ static inline int resize_screen(struct vc_data *vc, int width, int height, return err; } -/* - * Change # of rows and columns (0 means unchanged/the size of fg_console) - * [this is to be used together with some user program - * like resize that changes the hardware videomode] - */ -#define VC_RESIZE_MAXCOL (32767) -#define VC_RESIZE_MAXROW (32767) - /** * vc_do_resize - resizing method for the tty * @tty: tty being resized @@ -1190,7 +1196,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, unsigned int old_rows, old_row_size, first_copied_row; unsigned int new_cols, new_rows, new_row_size, new_screen_size; unsigned int user; - unsigned short *newscreen; + unsigned short *oldscreen, *newscreen; struct uni_screen *new_uniscr = NULL; WARN_CONSOLE_UNLOCKED(); @@ -1201,7 +1207,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, user = vc->vc_resize_user; vc->vc_resize_user = 0; - if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW) + if (cols > VC_MAXCOL || lines > VC_MAXROW) return -EINVAL; new_cols = (cols ? cols : vc->vc_cols); @@ -1212,7 +1218,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) return 0; - if (new_screen_size > KMALLOC_MAX_SIZE) + if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size) return -EINVAL; newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) @@ -1288,10 +1294,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_scr_end > new_origin) scr_memsetw((void *)new_origin, vc->vc_video_erase_char, new_scr_end - new_origin); - kfree(vc->vc_screenbuf); + oldscreen = vc->vc_screenbuf; vc->vc_screenbuf = newscreen; vc->vc_screenbuf_size = new_screen_size; set_origin(vc); + kfree(oldscreen); /* do part of a reset_terminal() */ vc->vc_top = 0; @@ -3396,6 +3403,7 @@ static int __init con_init(void) INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); tty_port_init(&vc->port); visual_init(vc, currcons, 1); + /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); vc_init(vc, vc->vc_rows, vc->vc_cols, currcons || !vc->vc_sw->con_save_screen); diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index daf61c28ba76..cbc85c995d92 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -893,12 +893,22 @@ int vt_ioctl(struct tty_struct *tty, console_lock(); vcp = vc_cons[i].d; if (vcp) { + int ret; + int save_scan_lines = vcp->vc_scan_lines; + int save_font_height = vcp->vc_font.height; + if (v.v_vlin) vcp->vc_scan_lines = v.v_vlin; if (v.v_clin) vcp->vc_font.height = v.v_clin; vcp->vc_resize_user = 1; - vc_resize(vcp, v.v_cols, v.v_rows); + ret = vc_resize(vcp, v.v_cols, v.v_rows); + if (ret) { + vcp->vc_scan_lines = save_scan_lines; + vcp->vc_font.height = save_font_height; + console_unlock(); + return ret; + } } console_unlock(); } diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c index 1303b165055b..538adf9c47bb 100644 --- a/drivers/uio/uio_pdrv_genirq.c +++ b/drivers/uio/uio_pdrv_genirq.c @@ -152,9 +152,9 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev) priv->pdev = pdev; if (!uioinfo->irq) { - ret = platform_get_irq(pdev, 0); + ret = platform_get_irq_optional(pdev, 0); uioinfo->irq = ret; - if (ret == -ENXIO && pdev->dev.of_node) + if (ret == -ENXIO) uioinfo->irq = UIO_IRQ_NONE; else if (ret < 0) { dev_err(&pdev->dev, "failed to get IRQ\n"); diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c index 633c52de3bb3..9865750bc31e 100644 --- a/drivers/usb/c67x00/c67x00-sched.c +++ b/drivers/usb/c67x00/c67x00-sched.c @@ -486,7 +486,7 @@ c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status) c67x00_release_urb(c67x00, urb); usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb); spin_unlock(&c67x00->lock); - usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status); + usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status); spin_lock(&c67x00->lock); } diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c index e3ae25483d7e..2ecaf8c29907 100644 --- a/drivers/usb/cdns3/ep0.c +++ b/drivers/usb/cdns3/ep0.c @@ -256,7 +256,7 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, return cdns3_ep0_delegate_req(priv_dev, ctrl); case USB_RECIP_ENDPOINT: /* check if endpoint is stalled */ - cdns3_select_ep(priv_dev, ctrl->wIndex); + cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts))) usb_status = BIT(USB_ENDPOINT_HALT); break; @@ -377,10 +377,10 @@ static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev, if (!(ctrl->wIndex & ~USB_DIR_IN)) return 0; - index = cdns3_ep_addr_to_index(ctrl->wIndex); + index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); priv_ep = priv_dev->eps[index]; - cdns3_select_ep(priv_dev, ctrl->wIndex); + cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); if (set) { cdns3_dbg(priv_ep->cdns3_dev, "Stall endpoint %s\n", @@ -477,7 +477,7 @@ static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev, if (priv_dev->gadget.state < USB_STATE_ADDRESS) return -EINVAL; - if (ctrl_req->wLength != 6) { + if (le16_to_cpu(ctrl_req->wLength) != 6) { dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n", ctrl_req->wLength); return -EINVAL; @@ -501,7 +501,7 @@ static int cdns3_req_ep0_set_isoch_delay(struct cdns3_device *priv_dev, if (ctrl_req->wIndex || ctrl_req->wLength) return -EINVAL; - priv_dev->isoch_delay = ctrl_req->wValue; + priv_dev->isoch_delay = le16_to_cpu(ctrl_req->wValue); return 0; } diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index 32b368a5f01c..dc9af5980f4c 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -189,10 +189,10 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) GFP_DMA); if (!priv_ep->trb_pool) return -ENOMEM; - } else { - memset(priv_ep->trb_pool, 0, ring_size); } + memset(priv_ep->trb_pool, 0, ring_size); + if (!priv_ep->num) return 0; diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h index c7ee01f4bfce..0e883ac5c5c5 100644 --- a/drivers/usb/cdns3/trace.h +++ b/drivers/usb/cdns3/trace.h @@ -286,9 +286,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb, TP_fast_assign( __assign_str(name, priv_ep->name); __entry->trb = trb; - __entry->buffer = trb->buffer; - __entry->length = trb->length; - __entry->control = trb->control; + __entry->buffer = le32_to_cpu(trb->buffer); + __entry->length = le32_to_cpu(trb->length); + __entry->control = le32_to_cpu(trb->control); __entry->type = usb_endpoint_type(priv_ep->endpoint.desc); ), TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s)", diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 579e65ababbd..71cbfdad727c 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -1361,6 +1361,29 @@ static void ci_controller_suspend(struct ci_hdrc *ci) enable_irq(ci->irq); } +/* + * Handle the wakeup interrupt triggered by extcon connector + * We need to call ci_irq again for extcon since the first + * interrupt (wakeup int) only let the controller be out of + * low power mode, but not handle any interrupts. + */ +static void ci_extcon_wakeup_int(struct ci_hdrc *ci) +{ + struct ci_hdrc_cable *cable_id, *cable_vbus; + u32 otgsc = hw_read_otgsc(ci, ~0); + + cable_id = &ci->platdata->id_extcon; + cable_vbus = &ci->platdata->vbus_extcon; + + if (!IS_ERR(cable_id->edev) && ci->is_otg && + (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) + ci_irq(ci->irq, ci); + + if (!IS_ERR(cable_vbus->edev) && ci->is_otg && + (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) + ci_irq(ci->irq, ci); +} + static int ci_controller_resume(struct device *dev) { struct ci_hdrc *ci = dev_get_drvdata(dev); @@ -1391,6 +1414,7 @@ static int ci_controller_resume(struct device *dev) enable_irq(ci->irq); if (ci_otg_is_fsm_mode(ci)) ci_otg_fsm_wakeup_by_srp(ci); + ci_extcon_wakeup_int(ci); } return 0; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index f67088bb8218..7499ba118665 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -378,21 +378,19 @@ static void acm_ctrl_irq(struct urb *urb) if (current_size < expected_size) { /* notification is transmitted fragmented, reassemble */ if (acm->nb_size < expected_size) { - if (acm->nb_size) { - kfree(acm->notification_buffer); - acm->nb_size = 0; - } + u8 *new_buffer; alloc_size = roundup_pow_of_two(expected_size); - /* - * kmalloc ensures a valid notification_buffer after a - * use of kfree in case the previous allocation was too - * small. Final freeing is done on disconnect. - */ - acm->notification_buffer = - kmalloc(alloc_size, GFP_ATOMIC); - if (!acm->notification_buffer) + /* Final freeing is done on disconnect. */ + new_buffer = krealloc(acm->notification_buffer, + alloc_size, GFP_ATOMIC); + if (!new_buffer) { + acm->nb_index = 0; goto exit; + } + + acm->notification_buffer = new_buffer; acm->nb_size = alloc_size; + dr = (struct usb_cdc_notification *)acm->notification_buffer; } copy_size = min(current_size, @@ -1689,6 +1687,8 @@ static int acm_pre_reset(struct usb_interface *intf) static const struct usb_device_id acm_ids[] = { /* quirky and broken devices */ + { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */ + .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */ { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */ .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 0d8e3f3804a3..67cbd42421be 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -468,7 +468,8 @@ static int usblp_release(struct inode *inode, struct file *file) usb_autopm_put_interface(usblp->intf); if (!usblp->present) /* finish cleanup from disconnect */ - usblp_cleanup(usblp); + usblp_cleanup(usblp); /* any URBs must be dead */ + mutex_unlock(&usblp_mutex); return 0; } @@ -826,6 +827,11 @@ static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, lo if (rv < 0) return rv; + if (!usblp->present) { + count = -ENODEV; + goto done; + } + if ((avail = usblp->rstatus) < 0) { printk(KERN_ERR "usblp%d: error %d reading from printer\n", usblp->minor, (int)avail); @@ -1375,9 +1381,11 @@ static void usblp_disconnect(struct usb_interface *intf) usblp_unlink_urbs(usblp); mutex_unlock(&usblp->mut); + usb_poison_anchored_urbs(&usblp->urbs); if (!usblp->used) usblp_cleanup(usblp); + mutex_unlock(&usblp_mutex); } diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index a1ac2f0723b0..041c68ea329f 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1204,6 +1204,34 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, } } +/* + * usb_disable_device_endpoints -- Disable all endpoints for a device + * @dev: the device whose endpoints are being disabled + * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. + */ +static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0) +{ + struct usb_hcd *hcd = bus_to_hcd(dev->bus); + int i; + + if (hcd->driver->check_bandwidth) { + /* First pass: Cancel URBs, leave endpoint pointers intact. */ + for (i = skip_ep0; i < 16; ++i) { + usb_disable_endpoint(dev, i, false); + usb_disable_endpoint(dev, i + USB_DIR_IN, false); + } + /* Remove endpoints from the host controller internal state */ + mutex_lock(hcd->bandwidth_mutex); + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + mutex_unlock(hcd->bandwidth_mutex); + } + /* Second pass: remove endpoint pointers */ + for (i = skip_ep0; i < 16; ++i) { + usb_disable_endpoint(dev, i, true); + usb_disable_endpoint(dev, i + USB_DIR_IN, true); + } +} + /** * usb_disable_device - Disable all the endpoints for a USB device * @dev: the device whose endpoints are being disabled @@ -1217,7 +1245,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, void usb_disable_device(struct usb_device *dev, int skip_ep0) { int i; - struct usb_hcd *hcd = bus_to_hcd(dev->bus); /* getting rid of interfaces will disconnect * any drivers bound to them (a key side effect) @@ -1263,22 +1290,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, skip_ep0 ? "non-ep0" : "all"); - if (hcd->driver->check_bandwidth) { - /* First pass: Cancel URBs, leave endpoint pointers intact. */ - for (i = skip_ep0; i < 16; ++i) { - usb_disable_endpoint(dev, i, false); - usb_disable_endpoint(dev, i + USB_DIR_IN, false); - } - /* Remove endpoints from the host controller internal state */ - mutex_lock(hcd->bandwidth_mutex); - usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); - mutex_unlock(hcd->bandwidth_mutex); - /* Second pass: remove endpoint pointers */ - } - for (i = skip_ep0; i < 16; ++i) { - usb_disable_endpoint(dev, i, true); - usb_disable_endpoint(dev, i + USB_DIR_IN, true); - } + + usb_disable_device_endpoints(dev, skip_ep0); } /** @@ -1521,6 +1534,9 @@ EXPORT_SYMBOL_GPL(usb_set_interface); * The caller must own the device lock. * * Return: Zero on success, else a negative error code. + * + * If this routine fails the device will probably be in an unusable state + * with endpoints disabled, and interfaces only partially enabled. */ int usb_reset_configuration(struct usb_device *dev) { @@ -1536,10 +1552,7 @@ int usb_reset_configuration(struct usb_device *dev) * calls during probe() are fine */ - for (i = 1; i < 16; ++i) { - usb_disable_endpoint(dev, i, true); - usb_disable_endpoint(dev, i + USB_DIR_IN, true); - } + usb_disable_device_endpoints(dev, 1); /* skip ep0*/ config = dev->actconfig; retval = 0; @@ -1552,34 +1565,10 @@ int usb_reset_configuration(struct usb_device *dev) mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } - /* Make sure we have enough bandwidth for each alternate setting 0 */ - for (i = 0; i < config->desc.bNumInterfaces; i++) { - struct usb_interface *intf = config->interface[i]; - struct usb_host_interface *alt; - alt = usb_altnum_to_altsetting(intf, 0); - if (!alt) - alt = &intf->altsetting[0]; - if (alt != intf->cur_altsetting) - retval = usb_hcd_alloc_bandwidth(dev, NULL, - intf->cur_altsetting, alt); - if (retval < 0) - break; - } - /* If not, reinstate the old alternate settings */ + /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */ + retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL); if (retval < 0) { -reset_old_alts: - for (i--; i >= 0; i--) { - struct usb_interface *intf = config->interface[i]; - struct usb_host_interface *alt; - - alt = usb_altnum_to_altsetting(intf, 0); - if (!alt) - alt = &intf->altsetting[0]; - if (alt != intf->cur_altsetting) - usb_hcd_alloc_bandwidth(dev, NULL, - alt, intf->cur_altsetting); - } usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; @@ -1588,8 +1577,12 @@ reset_old_alts: USB_REQ_SET_CONFIGURATION, 0, config->desc.bConfigurationValue, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); - if (retval < 0) - goto reset_old_alts; + if (retval < 0) { + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + usb_enable_lpm(dev); + mutex_unlock(hcd->bandwidth_mutex); + return retval; + } mutex_unlock(hcd->bandwidth_mutex); /* re-init hc/hcd interface/endpoint state */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 3e8efe759c3e..4ee810531098 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -25,17 +25,23 @@ static unsigned int quirk_count; static char quirks_param[128]; -static int quirks_param_set(const char *val, const struct kernel_param *kp) +static int quirks_param_set(const char *value, const struct kernel_param *kp) { - char *p, *field; + char *val, *p, *field; u16 vid, pid; u32 flags; size_t i; int err; + val = kstrdup(value, GFP_KERNEL); + if (!val) + return -ENOMEM; + err = param_set_copystring(val, kp); - if (err) + if (err) { + kfree(val); return err; + } mutex_lock(&quirk_mutex); @@ -60,10 +66,11 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) if (!quirk_list) { quirk_count = 0; mutex_unlock(&quirk_mutex); + kfree(val); return -ENOMEM; } - for (i = 0, p = (char *)val; p && *p;) { + for (i = 0, p = val; p && *p;) { /* Each entry consists of VID:PID:flags */ field = strsep(&p, ":"); if (!field) @@ -144,6 +151,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) unlock: mutex_unlock(&quirk_mutex); + kfree(val); return 0; } @@ -218,11 +226,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* Logitech HD Webcam C270 */ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ + /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT }, /* Logitech ConferenceCam CC3000e */ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, @@ -361,6 +370,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0926, 0x0202), .driver_info = USB_QUIRK_ENDPOINT_BLACKLIST }, + /* Sound Devices MixPre-D */ + { USB_DEVICE(0x0926, 0x0208), .driver_info = + USB_QUIRK_ENDPOINT_BLACKLIST }, + /* Keytouch QWERTY Panel keyboard */ { USB_DEVICE(0x0926, 0x3333), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, @@ -384,6 +397,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Generic RTL8153 based ethernet adapters */ { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM }, + /* SONiX USB DEVICE Touchpad */ + { USB_DEVICE(0x0c45, 0x7056), .driver_info = + USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* Action Semiconductor flash disk */ { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255 }, @@ -456,6 +473,8 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM }, + { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM }, + /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, @@ -500,6 +519,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { */ static const struct usb_device_id usb_endpoint_blacklist[] = { { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 }, + { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 }, { } }; diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index f19694e69f5c..2f594c88d905 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -889,7 +889,11 @@ read_descriptors(struct file *filp, struct kobject *kobj, size_t srclen, n; int cfgno; void *src; + int retval; + retval = usb_lock_device_interruptible(udev); + if (retval < 0) + return -EINTR; /* The binary attribute begins with the device descriptor. * Following that are the raw descriptor entries for all the * configurations (config plus subsidiary descriptors). @@ -914,6 +918,7 @@ read_descriptors(struct file *filp, struct kobject *kobj, off -= srclen; } } + usb_unlock_device(udev); return count - nleft; } diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index 6af6add3d4c0..6272b4ae4740 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c @@ -421,10 +421,13 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) if (ret && (ret != -ENOTSUPP)) dev_err(hsotg->dev, "exit power_down failed\n"); + /* Change to L0 state */ + hsotg->lx_state = DWC2_L0; call_gadget(hsotg, resume); + } else { + /* Change to L0 state */ + hsotg->lx_state = DWC2_L0; } - /* Change to L0 state */ - hsotg->lx_state = DWC2_L0; } else { if (hsotg->params.power_down) return; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 7fd0900a9cb0..f7528f732b2a 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -4886,12 +4886,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg) epnum, 0); } - ret = usb_add_gadget_udc(dev, &hsotg->gadget); - if (ret) { - dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, - hsotg->ctrl_req); - return ret; - } dwc2_hsotg_dump(hsotg); return 0; diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 3c6ce09a6db5..34bb6124f1e2 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -337,7 +337,8 @@ static void dwc2_driver_shutdown(struct platform_device *dev) { struct dwc2_hsotg *hsotg = platform_get_drvdata(dev); - disable_irq(hsotg->irq); + dwc2_disable_global_interrupts(hsotg); + synchronize_irq(hsotg->irq); } /** @@ -507,10 +508,23 @@ static int dwc2_driver_probe(struct platform_device *dev) if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) dwc2_lowlevel_hw_disable(hsotg); +#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) + /* Postponed adding a new gadget to the udc class driver list */ + if (hsotg->gadget_enabled) { + retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget); + if (retval) { + hsotg->gadget.udc = NULL; + dwc2_hsotg_remove(hsotg); + goto error; + } + } +#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */ return 0; error: - dwc2_lowlevel_hw_disable(hsotg); + if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) + dwc2_lowlevel_hw_disable(hsotg); return retval; } diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index b67372737dc9..139474c3e77b 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -38,6 +38,8 @@ #define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee #define PCI_DEVICE_ID_INTEL_EHLLP 0x4b7e #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee +#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee +#define PCI_DEVICE_ID_INTEL_JSP 0x4dee #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 @@ -206,8 +208,10 @@ static void dwc3_pci_resume_work(struct work_struct *work) int ret; ret = pm_runtime_get_sync(&dwc3->dev); - if (ret) + if (ret) { + pm_runtime_put_sync_autosuspend(&dwc3->dev); return; + } pm_runtime_mark_last_busy(&dwc3->dev); pm_runtime_put_sync_autosuspend(&dwc3->dev); @@ -356,6 +360,12 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP), (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPH), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB), (kernel_ulong_t) &dwc3_pci_amd_properties, }, { } /* Terminating Entry */ diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 0220919fa3d0..48daeda60510 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1021,26 +1021,24 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, * dwc3_prepare_one_trb - setup one TRB from one request * @dep: endpoint for which this request is prepared * @req: dwc3_request pointer + * @trb_length: buffer size of the TRB * @chain: should this TRB be chained to the next? * @node: only for isochronous endpoints. First TRB needs different type. */ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, - struct dwc3_request *req, unsigned chain, unsigned node) + struct dwc3_request *req, unsigned int trb_length, + unsigned chain, unsigned node) { struct dwc3_trb *trb; - unsigned int length; dma_addr_t dma; unsigned stream_id = req->request.stream_id; unsigned short_not_ok = req->request.short_not_ok; unsigned no_interrupt = req->request.no_interrupt; - if (req->request.num_sgs > 0) { - length = sg_dma_len(req->start_sg); + if (req->request.num_sgs > 0) dma = sg_dma_address(req->start_sg); - } else { - length = req->request.length; + else dma = req->request.dma; - } trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1052,7 +1050,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, req->num_trbs++; - __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, + __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node, stream_id, short_not_ok, no_interrupt); } @@ -1062,16 +1060,27 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, struct scatterlist *sg = req->start_sg; struct scatterlist *s; int i; - + unsigned int length = req->request.length; unsigned int remaining = req->request.num_mapped_sgs - req->num_queued_sgs; + /* + * If we resume preparing the request, then get the remaining length of + * the request and resume where we left off. + */ + for_each_sg(req->request.sg, s, req->num_queued_sgs, i) + length -= sg_dma_len(s); + for_each_sg(sg, s, remaining, i) { - unsigned int length = req->request.length; unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); unsigned int rem = length % maxp; + unsigned int trb_length; unsigned chain = true; + trb_length = min_t(unsigned int, length, sg_dma_len(s)); + + length -= trb_length; + /* * IOMMU driver is coalescing the list of sgs which shares a * page boundary into one and giving it to USB driver. With @@ -1079,7 +1088,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, * sgs passed. So mark the chain bit to false if it isthe last * mapped sg. */ - if (i == remaining - 1) + if ((i == remaining - 1) || !length) chain = false; if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { @@ -1089,7 +1098,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, i); + dwc3_prepare_one_trb(dep, req, trb_length, true, i); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1099,8 +1108,37 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); + } else if (req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && + !rem && !chain) { + struct dwc3 *dwc = dep->dwc; + struct dwc3_trb *trb; + + req->needs_extra_trb = true; + + /* Prepare normal TRB */ + dwc3_prepare_one_trb(dep, req, trb_length, true, i); + + /* Prepare one extra TRB to handle ZLP */ + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, + !req->direction, 1, + req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + + /* Prepare one more TRB to handle MPS alignment */ + if (!req->direction) { + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp, + false, 1, req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + } } else { - dwc3_prepare_one_trb(dep, req, chain, i); + dwc3_prepare_one_trb(dep, req, trb_length, chain, i); } /* @@ -1115,6 +1153,16 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->num_queued_sgs++; + /* + * The number of pending SG entries may not correspond to the + * number of mapped SG entries. If all the data are queued, then + * don't include unused SG entries. + */ + if (length == 0) { + req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs; + break; + } + if (!dwc3_calc_trbs_left(dep)) break; } @@ -1134,7 +1182,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, 0); + dwc3_prepare_one_trb(dep, req, length, true, 0); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1144,6 +1192,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->request.short_not_ok, req->request.no_interrupt); } else if (req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && (IS_ALIGNED(req->request.length, maxp))) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; @@ -1151,17 +1200,27 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, 0); + dwc3_prepare_one_trb(dep, req, length, true, 0); - /* Now prepare one extra TRB to handle ZLP */ + /* Prepare one extra TRB to handle ZLP */ trb = &dep->trb_pool[dep->trb_enqueue]; req->num_trbs++; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, - false, 1, req->request.stream_id, + !req->direction, 1, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); + + /* Prepare one more TRB to handle MPS alignment for OUT */ + if (!req->direction) { + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp, + false, 1, req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + } } else { - dwc3_prepare_one_trb(dep, req, false, 0); + dwc3_prepare_one_trb(dep, req, length, false, 0); } } @@ -1221,6 +1280,8 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep) } } +static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep); + static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) { struct dwc3_gadget_ep_cmd_params params; @@ -1260,14 +1321,20 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); if (ret < 0) { - /* - * FIXME we need to iterate over the list of requests - * here and stop, unmap, free and del each of the linked - * requests instead of what we do now. - */ - if (req->trb) - memset(req->trb, 0, sizeof(struct dwc3_trb)); - dwc3_gadget_del_and_unmap_request(dep, req, ret); + struct dwc3_request *tmp; + + if (ret == -EAGAIN) + return ret; + + dwc3_stop_active_transfer(dep, true, true); + + list_for_each_entry_safe(req, tmp, &dep->started_list, list) + dwc3_gadget_move_cancelled_request(req); + + /* If ep isn't started, then there's no end transfer pending */ + if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) + dwc3_gadget_ep_cleanup_cancelled_requests(dep); + return ret; } @@ -1509,6 +1576,10 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r { int i; + /* If req->trb is not set, then the request has not started */ + if (!req->trb) + return; + /* * If request was already started, this means we had to * stop the transfer. With that we also need to ignore @@ -1599,6 +1670,8 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) { struct dwc3_gadget_ep_cmd_params params; struct dwc3 *dwc = dep->dwc; + struct dwc3_request *req; + struct dwc3_request *tmp; int ret; if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { @@ -1635,13 +1708,37 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) else dep->flags |= DWC3_EP_STALL; } else { + /* + * Don't issue CLEAR_STALL command to control endpoints. The + * controller automatically clears the STALL when it receives + * the SETUP token. + */ + if (dep->number <= 1) { + dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); + return 0; + } ret = dwc3_send_clear_stall_ep_cmd(dep); - if (ret) + if (ret) { dev_err(dwc->dev, "failed to clear STALL on %s\n", dep->name); - else - dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); + return ret; + } + + dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); + + dwc3_stop_active_transfer(dep, true, true); + + list_for_each_entry_safe(req, tmp, &dep->started_list, list) + dwc3_gadget_move_cancelled_request(req); + + list_for_each_entry_safe(req, tmp, &dep->pending_list, list) + dwc3_gadget_move_cancelled_request(req); + + if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) { + dep->flags &= ~DWC3_EP_DELAY_START; + dwc3_gadget_ep_cleanup_cancelled_requests(dep); + } } return ret; @@ -2532,8 +2629,17 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, status); if (req->needs_extra_trb) { + unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); + + /* Reclaim MPS padding TRB for ZLP */ + if (!req->direction && req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && + (IS_ALIGNED(req->request.length, maxp))) + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); + req->needs_extra_trb = false; } diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index d98ca1566e95..f75ff1a75dc4 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -96,40 +96,43 @@ function_descriptors(struct usb_function *f, } /** - * next_ep_desc() - advance to the next EP descriptor + * next_desc() - advance to the next desc_type descriptor * @t: currect pointer within descriptor array + * @desc_type: descriptor type * - * Return: next EP descriptor or NULL + * Return: next desc_type descriptor or NULL * - * Iterate over @t until either EP descriptor found or + * Iterate over @t until either desc_type descriptor found or * NULL (that indicates end of list) encountered */ static struct usb_descriptor_header** -next_ep_desc(struct usb_descriptor_header **t) +next_desc(struct usb_descriptor_header **t, u8 desc_type) { for (; *t; t++) { - if ((*t)->bDescriptorType == USB_DT_ENDPOINT) + if ((*t)->bDescriptorType == desc_type) return t; } return NULL; } /* - * for_each_ep_desc()- iterate over endpoint descriptors in the - * descriptors list - * @start: pointer within descriptor array. - * @ep_desc: endpoint descriptor to use as the loop cursor + * for_each_desc() - iterate over desc_type descriptors in the + * descriptors list + * @start: pointer within descriptor array. + * @iter_desc: desc_type descriptor to use as the loop cursor + * @desc_type: wanted descriptr type */ -#define for_each_ep_desc(start, ep_desc) \ - for (ep_desc = next_ep_desc(start); \ - ep_desc; ep_desc = next_ep_desc(ep_desc+1)) +#define for_each_desc(start, iter_desc, desc_type) \ + for (iter_desc = next_desc(start, desc_type); \ + iter_desc; iter_desc = next_desc(iter_desc + 1, desc_type)) /** - * config_ep_by_speed() - configures the given endpoint + * config_ep_by_speed_and_alt() - configures the given endpoint * according to gadget speed. * @g: pointer to the gadget * @f: usb function * @_ep: the endpoint to configure + * @alt: alternate setting number * * Return: error code, 0 on success * @@ -142,11 +145,13 @@ next_ep_desc(struct usb_descriptor_header **t) * Note: the supplied function should hold all the descriptors * for supported speeds */ -int config_ep_by_speed(struct usb_gadget *g, - struct usb_function *f, - struct usb_ep *_ep) +int config_ep_by_speed_and_alt(struct usb_gadget *g, + struct usb_function *f, + struct usb_ep *_ep, + u8 alt) { struct usb_endpoint_descriptor *chosen_desc = NULL; + struct usb_interface_descriptor *int_desc = NULL; struct usb_descriptor_header **speed_desc = NULL; struct usb_ss_ep_comp_descriptor *comp_desc = NULL; @@ -182,8 +187,21 @@ int config_ep_by_speed(struct usb_gadget *g, default: speed_desc = f->fs_descriptors; } + + /* find correct alternate setting descriptor */ + for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) { + int_desc = (struct usb_interface_descriptor *)*d_spd; + + if (int_desc->bAlternateSetting == alt) { + speed_desc = d_spd; + goto intf_found; + } + } + return -EIO; + +intf_found: /* find descriptors */ - for_each_ep_desc(speed_desc, d_spd) { + for_each_desc(speed_desc, d_spd, USB_DT_ENDPOINT) { chosen_desc = (struct usb_endpoint_descriptor *)*d_spd; if (chosen_desc->bEndpointAddress == _ep->address) goto ep_found; @@ -237,6 +255,32 @@ ep_found: } return 0; } +EXPORT_SYMBOL_GPL(config_ep_by_speed_and_alt); + +/** + * config_ep_by_speed() - configures the given endpoint + * according to gadget speed. + * @g: pointer to the gadget + * @f: usb function + * @_ep: the endpoint to configure + * + * Return: error code, 0 on success + * + * This function chooses the right descriptors for a given + * endpoint according to gadget speed and saves it in the + * endpoint desc field. If the endpoint already has a descriptor + * assigned to it - overwrites it with currently corresponding + * descriptor. The endpoint maxpacket field is updated according + * to the chosen descriptor. + * Note: the supplied function should hold all the descriptors + * for supported speeds + */ +int config_ep_by_speed(struct usb_gadget *g, + struct usb_function *f, + struct usb_ep *_ep) +{ + return config_ep_by_speed_and_alt(g, f, _ep, 0); +} EXPORT_SYMBOL_GPL(config_ep_by_speed); /** diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 1d900081b1f0..b4206b0dede5 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1181,12 +1181,15 @@ static int ncm_unwrap_ntb(struct gether *port, int ndp_index; unsigned dg_len, dg_len2; unsigned ndp_len; + unsigned block_len; struct sk_buff *skb2; int ret = -EINVAL; - unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); + unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); + unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize); const struct ndp_parser_opts *opts = ncm->parser_opts; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; int dgram_counter; + bool ndp_after_header; /* dwSignature */ if (get_unaligned_le32(tmp) != opts->nth_sign) { @@ -1205,25 +1208,37 @@ static int ncm_unwrap_ntb(struct gether *port, } tmp++; /* skip wSequence */ + block_len = get_ncm(&tmp, opts->block_length); /* (d)wBlockLength */ - if (get_ncm(&tmp, opts->block_length) > max_size) { + if (block_len > ntb_max) { INFO(port->func.config->cdev, "OUT size exceeded\n"); goto err; } ndp_index = get_ncm(&tmp, opts->ndp_index); + ndp_after_header = false; /* Run through all the NDP's in the NTB */ do { - /* NCM 3.2 */ - if (((ndp_index % 4) != 0) && - (ndp_index < opts->nth_size)) { + /* + * NCM 3.2 + * dwNdpIndex + */ + if (((ndp_index % 4) != 0) || + (ndp_index < opts->nth_size) || + (ndp_index > (block_len - + opts->ndp_size))) { INFO(port->func.config->cdev, "Bad index: %#X\n", ndp_index); goto err; } + if (ndp_index == opts->nth_size) + ndp_after_header = true; - /* walk through NDP */ + /* + * walk through NDP + * dwSignature + */ tmp = (void *)(skb->data + ndp_index); if (get_unaligned_le32(tmp) != ncm->ndp_sign) { INFO(port->func.config->cdev, "Wrong NDP SIGN\n"); @@ -1234,14 +1249,15 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_len = get_unaligned_le16(tmp++); /* * NCM 3.3.1 + * wLength * entry is 2 items * item size is 16/32 bits, opts->dgram_item_len * 2 bytes * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry * Each entry is a dgram index and a dgram length. */ if ((ndp_len < opts->ndp_size - + 2 * 2 * (opts->dgram_item_len * 2)) - || (ndp_len % opts->ndplen_align != 0)) { + + 2 * 2 * (opts->dgram_item_len * 2)) || + (ndp_len % opts->ndplen_align != 0)) { INFO(port->func.config->cdev, "Bad NDP length: %#X\n", ndp_len); goto err; @@ -1258,8 +1274,21 @@ static int ncm_unwrap_ntb(struct gether *port, do { index = index2; + /* wDatagramIndex[0] */ + if ((index < opts->nth_size) || + (index > block_len - opts->dpe_size)) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index); + goto err; + } + dg_len = dg_len2; - if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */ + /* + * wDatagramLength[0] + * ethernet hdr + crc or larger than max frame size + */ + if ((dg_len < 14 + crc_len) || + (dg_len > frame_max)) { INFO(port->func.config->cdev, "Bad dgram length: %#X\n", dg_len); goto err; @@ -1283,6 +1312,37 @@ static int ncm_unwrap_ntb(struct gether *port, index2 = get_ncm(&tmp, opts->dgram_item_len); dg_len2 = get_ncm(&tmp, opts->dgram_item_len); + if (index2 == 0 || dg_len2 == 0) + break; + + /* wDatagramIndex[1] */ + if (ndp_after_header) { + if (index2 < opts->nth_size + opts->ndp_size) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index2); + goto err; + } + } else { + if (index2 < opts->nth_size + opts->dpe_size) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index2); + goto err; + } + } + if (index2 > block_len - opts->dpe_size) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index2); + goto err; + } + + /* wDatagramLength[1] */ + if ((dg_len2 < 14 + crc_len) || + (dg_len2 > frame_max)) { + INFO(port->func.config->cdev, + "Bad dgram length: %#X\n", dg_len); + goto err; + } + /* * Copy the data into a new skb. * This ensures the truesize is correct @@ -1299,9 +1359,6 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_len -= 2 * (opts->dgram_item_len * 2); dgram_counter++; - - if (index2 == 0 || dg_len2 == 0) - break; } while (ndp_len > 2 * (opts->dgram_item_len * 2)); } while (ndp_index); diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 7f01f78b1d23..f6d203fec495 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -751,12 +751,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream) goto err_sts; return 0; + err_sts: - usb_ep_free_request(fu->ep_status, stream->req_status); - stream->req_status = NULL; -err_out: usb_ep_free_request(fu->ep_out, stream->req_out); stream->req_out = NULL; +err_out: + usb_ep_free_request(fu->ep_in, stream->req_in); + stream->req_in = NULL; out: return -ENOMEM; } diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c index 6677ae932de0..06ee6e901808 100644 --- a/drivers/usb/gadget/function/f_uac1_legacy.c +++ b/drivers/usb/gadget/function/f_uac1_legacy.c @@ -336,7 +336,9 @@ static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req) /* Copy buffer is full, add it to the play_queue */ if (audio_buf_size - copy_buf->actual < req->actual) { + spin_lock_irq(&audio->lock); list_add_tail(©_buf->list, &audio->play_queue); + spin_unlock_irq(&audio->lock); schedule_work(&audio->playback_work); copy_buf = f_audio_buffer_alloc(audio_buf_size); if (IS_ERR(copy_buf)) diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index db2d4980cb35..3633df6d7610 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -215,10 +215,7 @@ static struct uac2_ac_header_descriptor ac_hdr_desc = { .bDescriptorSubtype = UAC_MS_HEADER, .bcdADC = cpu_to_le16(0x200), .bCategory = UAC2_FUNCTION_IO_BOX, - .wTotalLength = cpu_to_le16(sizeof in_clk_src_desc - + sizeof out_clk_src_desc + sizeof usb_out_it_desc - + sizeof io_in_it_desc + sizeof usb_in_ot_desc - + sizeof io_out_ot_desc), + /* .wTotalLength = DYNAMIC */ .bmControls = 0, }; @@ -501,7 +498,7 @@ static void setup_descriptor(struct f_uac2_opts *opts) as_in_hdr_desc.bTerminalLink = usb_in_ot_desc.bTerminalID; iad_desc.bInterfaceCount = 1; - ac_hdr_desc.wTotalLength = 0; + ac_hdr_desc.wTotalLength = cpu_to_le16(sizeof(ac_hdr_desc)); if (EPIN_EN(opts)) { u16 len = le16_to_cpu(ac_hdr_desc.wTotalLength); diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h index eaa13fd3dc7f..e313c3b8dcb1 100644 --- a/drivers/usb/gadget/u_f.h +++ b/drivers/usb/gadget/u_f.h @@ -14,6 +14,7 @@ #define __U_F_H__ #include +#include /* Variable Length Array Macros **********************************************/ #define vla_group(groupname) size_t groupname##__next = 0 @@ -21,21 +22,36 @@ #define vla_item(groupname, type, name, n) \ size_t groupname##_##name##__offset = ({ \ - size_t align_mask = __alignof__(type) - 1; \ - size_t offset = (groupname##__next + align_mask) & ~align_mask;\ - size_t size = (n) * sizeof(type); \ - groupname##__next = offset + size; \ + size_t offset = 0; \ + if (groupname##__next != SIZE_MAX) { \ + size_t align_mask = __alignof__(type) - 1; \ + size_t size = array_size(n, sizeof(type)); \ + offset = (groupname##__next + align_mask) & \ + ~align_mask; \ + if (check_add_overflow(offset, size, \ + &groupname##__next)) { \ + groupname##__next = SIZE_MAX; \ + offset = 0; \ + } \ + } \ offset; \ }) #define vla_item_with_sz(groupname, type, name, n) \ - size_t groupname##_##name##__sz = (n) * sizeof(type); \ - size_t groupname##_##name##__offset = ({ \ - size_t align_mask = __alignof__(type) - 1; \ - size_t offset = (groupname##__next + align_mask) & ~align_mask;\ - size_t size = groupname##_##name##__sz; \ - groupname##__next = offset + size; \ - offset; \ + size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \ + size_t groupname##_##name##__offset = ({ \ + size_t offset = 0; \ + if (groupname##__next != SIZE_MAX) { \ + size_t align_mask = __alignof__(type) - 1; \ + offset = (groupname##__next + align_mask) & \ + ~align_mask; \ + if (check_add_overflow(offset, groupname##_##name##__sz,\ + &groupname##__next)) { \ + groupname##__next = SIZE_MAX; \ + offset = 0; \ + } \ + } \ + offset; \ }) #define vla_ptr(ptr, groupname, name) \ diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 58e5b015d40e..bebe814f55e6 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -870,7 +870,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) u32 status; DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", - ep->ep.name, req); + ep->ep.name, _req); spin_lock_irqsave(&udc->lock, flags); diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index cc4a16e253ac..3d33499db50b 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c @@ -282,6 +282,7 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit) * in that case reinit is passed as 1 */ if (reinit) { + int i; /* Enable interrupts */ temp = bdc_readl(bdc->regs, BDC_BDCSC); temp |= BDC_GIE; @@ -291,6 +292,9 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit) /* Initialize SRR to 0 */ memset(bdc->srr.sr_bds, 0, NUM_SR_ENTRIES * sizeof(struct bdc_bd)); + /* clear ep flags to avoid post disconnect stops/deconfigs */ + for (i = 1; i < bdc->num_eps; ++i) + bdc->bdc_ep_array[i]->flags = 0; } else { /* One time initiaization only */ /* Enable status report function pointers */ @@ -601,9 +605,14 @@ static int bdc_remove(struct platform_device *pdev) static int bdc_suspend(struct device *dev) { struct bdc *bdc = dev_get_drvdata(dev); + int ret; - clk_disable_unprepare(bdc->clk); - return 0; + /* Halt the controller */ + ret = bdc_stop(bdc); + if (!ret) + clk_disable_unprepare(bdc->clk); + + return ret; } static int bdc_resume(struct device *dev) diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index d49c6dc1082d..9ddc0b4e92c9 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -615,7 +615,6 @@ int bdc_ep_enable(struct bdc_ep *ep) } bdc_dbg_bd_list(bdc, ep); /* only for ep0: config ep is called for ep0 from connect event */ - ep->flags |= BDC_EP_ENABLED; if (ep->ep_num == 1) return ret; @@ -759,10 +758,13 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req) __func__, ep->name, start_bdi, end_bdi); dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n", ep, (void *)ep->usb_ep.desc); - /* Stop the ep to see where the HW is ? */ - ret = bdc_stop_ep(bdc, ep->ep_num); - /* if there is an issue with stopping ep, then no need to go further */ - if (ret) + /* if still connected, stop the ep to see where the HW is ? */ + if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) { + ret = bdc_stop_ep(bdc, ep->ep_num); + /* if there is an issue, then no need to go further */ + if (ret) + return 0; + } else return 0; /* @@ -1911,7 +1913,9 @@ static int bdc_gadget_ep_disable(struct usb_ep *_ep) __func__, ep->name, ep->flags); if (!(ep->flags & BDC_EP_ENABLED)) { - dev_warn(bdc->dev, "%s is already disabled\n", ep->name); + if (bdc->gadget.speed != USB_SPEED_UNKNOWN) + dev_warn(bdc->dev, "%s is already disabled\n", + ep->name); return 0; } spin_lock_irqsave(&bdc->lock, flags); diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 116d386472ef..da73a06c20a3 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -1980,9 +1980,12 @@ static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit) if (num == 0) { _req = gr_alloc_request(&ep->ep, GFP_ATOMIC); + if (!_req) + return -ENOMEM; + buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC); - if (!_req || !buf) { - /* possible _req freed by gr_probe via gr_remove */ + if (!buf) { + gr_free_request(&ep->ep, _req); return -ENOMEM; } diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index bf6c81e2f8cc..6d2f1f98f13d 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -1614,17 +1614,17 @@ static int lpc32xx_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); - struct lpc32xx_udc *udc = ep->udc; + struct lpc32xx_udc *udc; u16 maxpacket; u32 tmp; unsigned long flags; /* Verify EP data */ if ((!_ep) || (!ep) || (!desc) || - (desc->bDescriptorType != USB_DT_ENDPOINT)) { - dev_dbg(udc->dev, "bad ep or descriptor\n"); + (desc->bDescriptorType != USB_DT_ENDPOINT)) return -EINVAL; - } + + udc = ep->udc; maxpacket = usb_endpoint_maxp(desc); if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) { dev_dbg(udc->dev, "bad ep descriptor's packet size\n"); @@ -1872,7 +1872,7 @@ static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) { struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); - struct lpc32xx_udc *udc = ep->udc; + struct lpc32xx_udc *udc; unsigned long flags; if ((!ep) || (ep->hwep_num <= 1)) @@ -1882,6 +1882,7 @@ static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) if (ep->is_in) return -EAGAIN; + udc = ep->udc; spin_lock_irqsave(&udc->lock, flags); if (value == 1) { diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c index a8288df6aadf..ea59b56e5402 100644 --- a/drivers/usb/gadget/udc/m66592-udc.c +++ b/drivers/usb/gadget/udc/m66592-udc.c @@ -1667,7 +1667,7 @@ static int m66592_probe(struct platform_device *pdev) err_add_udc: m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); - + m66592->ep0_req = NULL; clean_up3: if (m66592->pdata->on_chip) { clk_disable(m66592->clk); diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index cafde053788b..80a1b52c656e 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -2313,7 +2313,8 @@ static int mv_udc_probe(struct platform_device *pdev) return 0; err_create_workqueue: - destroy_workqueue(udc->qwork); + if (udc->qwork) + destroy_workqueue(udc->qwork); err_destroy_dma: dma_pool_destroy(udc->dtd_pool); err_free_dma: diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 51efee21915f..7c616d7641c6 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -3782,8 +3782,10 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; done: - if (dev) + if (dev) { net2280_remove(pdev); + kfree(dev); + } return retval; } diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c index f82208fbc249..5dcc0692b95c 100644 --- a/drivers/usb/gadget/udc/s3c2410_udc.c +++ b/drivers/usb/gadget/udc/s3c2410_udc.c @@ -251,10 +251,6 @@ static void s3c2410_udc_done(struct s3c2410_ep *ep, static void s3c2410_udc_nuke(struct s3c2410_udc *udc, struct s3c2410_ep *ep, int status) { - /* Sanity check */ - if (&ep->queue == NULL) - return; - while (!list_empty(&ep->queue)) { struct s3c2410_request *req; req = list_entry(ep->queue.next, struct s3c2410_request, diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c index 01debfd03d4a..84d59a611511 100644 --- a/drivers/usb/host/ehci-exynos.c +++ b/drivers/usb/host/ehci-exynos.c @@ -203,9 +203,8 @@ static int exynos_ehci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "Failed to get IRQ\n"); - err = -ENODEV; + if (irq < 0) { + err = irq; goto fail_io; } diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index ba8c799b5521..c31ad03d6414 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index fb4463f03b45..bfe73f38254b 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -14,7 +14,6 @@ */ /*-------------------------------------------------------------------------*/ -#include #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index c9f91e6c72b6..7f65c86047dd 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c @@ -50,6 +50,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev)); if (!hcd) diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index b0882c13a1d1..66713c253765 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -216,6 +216,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd) ehci_info(ehci, "applying MosChip frame-index workaround\n"); ehci->frame_index_bug = 1; break; + case PCI_VENDOR_ID_HUAWEI: + /* Synopsys HC bug */ + if (pdev->device == 0xa239) { + ehci_info(ehci, "applying Synopsys HC workaround\n"); + ehci->has_synopsys_hc_bug = 1; + } + break; } /* optional debug port, normally in the first BAR */ diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c index 769749ca5961..e4fc3f66d43b 100644 --- a/drivers/usb/host/ehci-platform.c +++ b/drivers/usb/host/ehci-platform.c @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include #include #include @@ -44,6 +46,9 @@ struct ehci_platform_priv { struct clk *clks[EHCI_MAX_CLKS]; struct reset_control *rsts; bool reset_on_resume; + bool quirk_poll; + struct timer_list poll_timer; + struct delayed_work poll_work; }; static const char hcd_name[] = "ehci-platform"; @@ -118,6 +123,111 @@ static struct usb_ehci_pdata ehci_platform_defaults = { .power_off = ehci_platform_power_off, }; +/** + * quirk_poll_check_port_status - Poll port_status if the device sticks + * @ehci: the ehci hcd pointer + * + * Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting + * stuck very rarely after a full/low usb device was disconnected. To + * detect such a situation, the controllers require a special way which poll + * the EHCI PORTSC register. + * + * Return: true if the controller's port_status indicated getting stuck + */ +static bool quirk_poll_check_port_status(struct ehci_hcd *ehci) +{ + u32 port_status = ehci_readl(ehci, &ehci->regs->port_status[0]); + + if (!(port_status & PORT_OWNER) && + (port_status & PORT_POWER) && + !(port_status & PORT_CONNECT) && + (port_status & PORT_LS_MASK)) + return true; + + return false; +} + +/** + * quirk_poll_rebind_companion - rebind comanion device to recover + * @ehci: the ehci hcd pointer + * + * Since EHCI/OHCI controllers on R-Car Gen3 SoCs are possible to be getting + * stuck very rarely after a full/low usb device was disconnected. To + * recover from such a situation, the controllers require changing the OHCI + * functional state. + */ +static void quirk_poll_rebind_companion(struct ehci_hcd *ehci) +{ + struct device *companion_dev; + struct usb_hcd *hcd = ehci_to_hcd(ehci); + + companion_dev = usb_of_get_companion_dev(hcd->self.controller); + if (!companion_dev) + return; + + device_release_driver(companion_dev); + if (device_attach(companion_dev) < 0) + ehci_err(ehci, "%s: failed\n", __func__); + + put_device(companion_dev); +} + +static void quirk_poll_work(struct work_struct *work) +{ + struct ehci_platform_priv *priv = + container_of(to_delayed_work(work), struct ehci_platform_priv, + poll_work); + struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd, + priv); + + /* check the status twice to reduce misdetection rate */ + if (!quirk_poll_check_port_status(ehci)) + return; + udelay(10); + if (!quirk_poll_check_port_status(ehci)) + return; + + ehci_dbg(ehci, "%s: detected getting stuck. rebind now!\n", __func__); + quirk_poll_rebind_companion(ehci); +} + +static void quirk_poll_timer(struct timer_list *t) +{ + struct ehci_platform_priv *priv = from_timer(priv, t, poll_timer); + struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd, + priv); + + if (quirk_poll_check_port_status(ehci)) { + /* + * Now scheduling the work for testing the port more. Note that + * updating the status is possible to be delayed when + * reconnection. So, this uses delayed work with 5 ms delay + * to avoid misdetection. + */ + schedule_delayed_work(&priv->poll_work, msecs_to_jiffies(5)); + } + + mod_timer(&priv->poll_timer, jiffies + HZ); +} + +static void quirk_poll_init(struct ehci_platform_priv *priv) +{ + INIT_DELAYED_WORK(&priv->poll_work, quirk_poll_work); + timer_setup(&priv->poll_timer, quirk_poll_timer, 0); + mod_timer(&priv->poll_timer, jiffies + HZ); +} + +static void quirk_poll_end(struct ehci_platform_priv *priv) +{ + del_timer_sync(&priv->poll_timer); + cancel_delayed_work(&priv->poll_work); +} + +static const struct soc_device_attribute quirk_poll_match[] = { + { .family = "R-Car Gen3" }, + { /* sentinel*/ } +}; + static int ehci_platform_probe(struct platform_device *dev) { struct usb_hcd *hcd; @@ -176,6 +286,9 @@ static int ehci_platform_probe(struct platform_device *dev) "has-transaction-translator")) hcd->has_tt = 1; + if (soc_device_match(quirk_poll_match)) + priv->quirk_poll = true; + for (clk = 0; clk < EHCI_MAX_CLKS; clk++) { priv->clks[clk] = of_clk_get(dev->dev.of_node, clk); if (IS_ERR(priv->clks[clk])) { @@ -247,6 +360,9 @@ static int ehci_platform_probe(struct platform_device *dev) device_enable_async_suspend(hcd->self.controller); platform_set_drvdata(dev, hcd); + if (priv->quirk_poll) + quirk_poll_init(priv); + return err; err_power: @@ -273,6 +389,9 @@ static int ehci_platform_remove(struct platform_device *dev) struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd); int clk; + if (priv->quirk_poll) + quirk_poll_end(priv); + usb_remove_hcd(hcd); if (pdata->power_off) @@ -297,9 +416,13 @@ static int ehci_platform_suspend(struct device *dev) struct usb_hcd *hcd = dev_get_drvdata(dev); struct usb_ehci_pdata *pdata = dev_get_platdata(dev); struct platform_device *pdev = to_platform_device(dev); + struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd); bool do_wakeup = device_may_wakeup(dev); int ret; + if (priv->quirk_poll) + quirk_poll_end(priv); + ret = ehci_suspend(hcd, do_wakeup); if (ret) return ret; @@ -331,6 +454,10 @@ static int ehci_platform_resume(struct device *dev) } ehci_resume(hcd, priv->reset_on_resume); + + if (priv->quirk_poll) + quirk_poll_init(priv); + return 0; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c index d5ce98e205c7..d8b6c9f5695c 100644 --- a/drivers/usb/host/ohci-exynos.c +++ b/drivers/usb/host/ohci-exynos.c @@ -171,9 +171,8 @@ static int exynos_ohci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "Failed to get IRQ\n"); - err = -ENODEV; + if (irq < 0) { + err = irq; goto fail_io; } diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c index c158cda9e4b9..b91d50da6127 100644 --- a/drivers/usb/host/ohci-sm501.c +++ b/drivers/usb/host/ohci-sm501.c @@ -157,9 +157,10 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev) * the call to usb_hcd_setup_local_mem() below does just that. */ - if (usb_hcd_setup_local_mem(hcd, mem->start, - mem->start - mem->parent->start, - resource_size(mem)) < 0) + retval = usb_hcd_setup_local_mem(hcd, mem->start, + mem->start - mem->parent->start, + resource_size(mem)); + if (retval < 0) goto err5; retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval) @@ -190,6 +191,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev) struct resource *mem; usb_remove_hcd(hcd); + iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 76c3f29562d2..448d7b11dec4 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -273,7 +273,7 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused) static int xhci_endpoint_context_show(struct seq_file *s, void *unused) { - int dci; + int ep_index; dma_addr_t dma; struct xhci_hcd *xhci; struct xhci_ep_ctx *ep_ctx; @@ -282,9 +282,9 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused) xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus)); - for (dci = 1; dci < 32; dci++) { - ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci); - dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params); + for (ep_index = 0; ep_index < 31; ep_index++) { + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); + dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params); seq_printf(s, "%pad: %s\n", &dma, xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info), le32_to_cpu(ep_ctx->ep_info2), diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 879a8540414b..40c3425229c7 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -738,15 +738,6 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, { u32 pls = status_reg & PORT_PLS_MASK; - /* resume state is a xHCI internal state. - * Do not report it to usb core, instead, pretend to be U3, - * thus usb core knows it's not ready for transfer - */ - if (pls == XDEV_RESUME) { - *status |= USB_SS_PORT_LS_U3; - return; - } - /* When the CAS bit is set then warm reset * should be performed on port */ @@ -768,6 +759,16 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, */ pls |= USB_PORT_STAT_CONNECTION; } else { + /* + * Resume state is an xHCI internal state. Do not report it to + * usb core, instead, pretend to be U3, thus usb core knows + * it's not ready for transfer. + */ + if (pls == XDEV_RESUME) { + *status |= USB_SS_PORT_LS_U3; + return; + } + /* * If CAS bit isn't set but the Port is already at * Compliance Mode, fake a connection so the USB core diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index fea555570ad4..45c54d56ecbd 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -557,6 +557,10 @@ static bool need_bw_sch(struct usb_host_endpoint *ep, if (is_fs_or_ls(speed) && !has_tt) return false; + /* skip endpoint with zero maxpkt */ + if (usb_endpoint_maxp(&ep->desc) == 0) + return false; + return true; } diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index b18a6baef204..85f1ff0399a9 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -592,6 +592,9 @@ static int xhci_mtk_remove(struct platform_device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct usb_hcd *shared_hcd = xhci->shared_hcd; + pm_runtime_put_noidle(&dev->dev); + pm_runtime_disable(&dev->dev); + usb_remove_hcd(shared_hcd); xhci->shared_hcd = NULL; device_init_wakeup(&dev->dev, false); @@ -602,8 +605,6 @@ static int xhci_mtk_remove(struct platform_device *dev) xhci_mtk_sch_exit(mtk); xhci_mtk_clks_disable(mtk); xhci_mtk_ldos_disable(mtk); - pm_runtime_put_sync(&dev->dev); - pm_runtime_disable(&dev->dev); return 0; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1fddc41fa1f3..bbd616324faa 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -55,7 +55,10 @@ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc +#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 +#define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 +#define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 static const char hcd_name[] = "xhci_hcd"; @@ -245,11 +248,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_BROKEN_STREAMS; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == 0x1042) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) xhci->quirks |= XHCI_BROKEN_STREAMS; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == 0x1142) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + (pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI || + pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI)) + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index ff04d8e69bbb..a70a8cbb2a51 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1430,6 +1430,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, xhci->devs[slot_id]->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); + ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); @@ -3235,10 +3236,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, wait_for_completion(cfg_cmd->completion); - ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; xhci_free_command(xhci, cfg_cmd); cleanup: xhci_free_command(xhci, stop_cmd); + if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) + ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; } static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, @@ -4390,6 +4392,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, int hird, exit_latency; int ret; + if (xhci->quirks & XHCI_HW_LPM_DISABLE) + return -EPERM; + if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || !udev->lpm_capable) return -EPERM; @@ -4412,7 +4417,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", enable ? "enable" : "disable", port_num + 1); - if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) { + if (enable) { /* Host supports BESL timeout instead of HIRD */ if (udev->usb2_hw_lpm_besl_capable) { /* if device doesn't have a preferred BESL value use a @@ -4471,6 +4476,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, mutex_lock(hcd->bandwidth_mutex); xhci_change_max_exit_latency(xhci, udev, 0); mutex_unlock(hcd->bandwidth_mutex); + readl_poll_timeout(ports[port_num]->addr, pm_val, + (pm_val & PORT_PLS_MASK) == XDEV_U0, + 100, 10000); return 0; } } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index a7955a94556b..c0127960298a 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -716,7 +716,7 @@ struct xhci_ep_ctx { * 4 - TRB error * 5-7 - reserved */ -#define EP_STATE_MASK (0xf) +#define EP_STATE_MASK (0x7) #define EP_STATE_DISABLED 0 #define EP_STATE_RUNNING 1 #define EP_STATE_HALTED 2 diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index dce20301e367..103c69c692ba 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -2,8 +2,9 @@ /* * Native support for the I/O-Warrior USB devices * - * Copyright (c) 2003-2005 Code Mercenaries GmbH - * written by Christian Lucht + * Copyright (c) 2003-2005, 2020 Code Mercenaries GmbH + * written by Christian Lucht and + * Christoph Jung * * based on @@ -802,14 +803,28 @@ static int iowarrior_probe(struct usb_interface *interface, /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); - if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && - ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100))) - /* IOWarrior56 has wMaxPacketSize different from report size */ - dev->report_size = 7; + + /* + * Some devices need the report size to be different than the + * endpoint size. + */ + if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) { + switch (dev->product_id) { + case USB_DEVICE_ID_CODEMERCS_IOW56: + case USB_DEVICE_ID_CODEMERCS_IOW56AM: + dev->report_size = 7; + break; + + case USB_DEVICE_ID_CODEMERCS_IOW28: + case USB_DEVICE_ID_CODEMERCS_IOW28L: + dev->report_size = 4; + break; + + case USB_DEVICE_ID_CODEMERCS_IOW100: + dev->report_size = 13; + break; + } + } /* create the urb and buffer for reading */ dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL); diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c index 407fe7570f3b..f8686139d6f3 100644 --- a/drivers/usb/misc/lvstest.c +++ b/drivers/usb/misc/lvstest.c @@ -426,7 +426,7 @@ static int lvs_rh_probe(struct usb_interface *intf, USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT); if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) { dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret); - return ret; + return ret < 0 ? ret : -EINVAL; } /* submit urb to poll interrupt endpoint */ diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index fc8a5da4a07c..0734e6dd9386 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -761,7 +761,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr, u8 swap8, fromkern = kernbuffer ? 1 : 0; u16 swap16; u32 swap32, flag = (length >> 28) & 1; - char buf[4]; + u8 buf[4]; /* if neither kernbuffer not userbuffer are given, assume * data in obuf diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 98ada1a3425c..bae88893ee8e 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -2873,6 +2873,7 @@ static void usbtest_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); dev_dbg(&intf->dev, "disconnect\n"); + kfree(dev->buf); kfree(dev); } diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index be0505b8b5d4..785080f79073 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -492,7 +492,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE); dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__, dev->cntl_buffer[0]); - retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL); + retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC); if (retval >= 0) timeout = schedule_timeout(YUREX_WRITE_TIMEOUT); finish_wait(&dev->waitq, &wait); diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c index 9dd02160cca9..e3780d4d6514 100644 --- a/drivers/usb/mtu3/mtu3_core.c +++ b/drivers/usb/mtu3/mtu3_core.c @@ -131,8 +131,12 @@ static void mtu3_device_disable(struct mtu3 *mtu) mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN); - if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) + if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) { mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); + if (mtu->is_u3_ip) + mtu3_clrbits(ibase, SSUSB_U3_CTRL(0), + SSUSB_U3_PORT_DUAL_MODE); + } mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); } diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 86637cd066cf..05cdad13933b 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -803,7 +803,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) return info->dma_map_ctrl(chan->device->dev, pkt, map); } -static void usbhsf_dma_complete(void *arg); +static void usbhsf_dma_complete(void *arg, + const struct dmaengine_result *result); static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) { struct usbhs_pipe *pipe = pkt->pipe; @@ -813,6 +814,7 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) struct dma_chan *chan; struct device *dev = usbhs_priv_to_dev(priv); enum dma_transfer_direction dir; + dma_cookie_t cookie; fifo = usbhs_pipe_to_fifo(pipe); if (!fifo) @@ -827,11 +829,11 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) if (!desc) return; - desc->callback = usbhsf_dma_complete; - desc->callback_param = pipe; + desc->callback_result = usbhsf_dma_complete; + desc->callback_param = pkt; - pkt->cookie = dmaengine_submit(desc); - if (pkt->cookie < 0) { + cookie = dmaengine_submit(desc); + if (cookie < 0) { dev_err(dev, "Failed to submit dma descriptor\n"); return; } @@ -1152,12 +1154,10 @@ static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt, struct dma_chan *chan, int dtln) { struct usbhs_pipe *pipe = pkt->pipe; - struct dma_tx_state state; size_t received_size; int maxp = usbhs_pipe_get_maxpacket(pipe); - dmaengine_tx_status(chan, pkt->cookie, &state); - received_size = pkt->length - state.residue; + received_size = pkt->length - pkt->dma_result->residue; if (dtln) { received_size -= USBHS_USB_DMAC_XFER_SIZE; @@ -1363,13 +1363,16 @@ static int usbhsf_irq_ready(struct usbhs_priv *priv, return 0; } -static void usbhsf_dma_complete(void *arg) +static void usbhsf_dma_complete(void *arg, + const struct dmaengine_result *result) { - struct usbhs_pipe *pipe = arg; + struct usbhs_pkt *pkt = arg; + struct usbhs_pipe *pipe = pkt->pipe; struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); struct device *dev = usbhs_priv_to_dev(priv); int ret; + pkt->dma_result = result; ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE); if (ret < 0) dev_err(dev, "dma_complete run_error %d : %d\n", diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h index c3d3cc35cee0..4a7dc23ce3d3 100644 --- a/drivers/usb/renesas_usbhs/fifo.h +++ b/drivers/usb/renesas_usbhs/fifo.h @@ -50,7 +50,7 @@ struct usbhs_pkt { struct usbhs_pkt *pkt); struct work_struct work; dma_addr_t dma; - dma_cookie_t cookie; + const struct dmaengine_result *dma_result; void *buf; int length; int trans; diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index dcdd541b3291..390bc4b25045 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -81,6 +81,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x4348, 0x5523) }, + { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, { USB_DEVICE(0x1a86, 0x5523) }, { }, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f5143eedbc48..a90801ef0055 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -272,6 +272,8 @@ static struct usb_serial_driver cp210x_device = { .break_ctl = cp210x_break_ctl, .set_termios = cp210x_set_termios, .tx_empty = cp210x_tx_empty, + .throttle = usb_serial_generic_throttle, + .unthrottle = usb_serial_generic_unthrottle, .tiocmget = cp210x_tiocmget, .tiocmset = cp210x_tiocmset, .attach = cp210x_attach, @@ -915,6 +917,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, u32 baud; u16 bits; u32 ctl_hs; + u32 flow_repl; cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud); @@ -1015,6 +1018,22 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake); if (ctl_hs & CP210X_SERIAL_CTS_HANDSHAKE) { dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__); + /* + * When the port is closed, the CP210x hardware disables + * auto-RTS and RTS is deasserted but it leaves auto-CTS when + * in hardware flow control mode. When re-opening the port, if + * auto-CTS is enabled on the cp210x, then auto-RTS must be + * re-enabled in the driver. + */ + flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace); + flow_repl &= ~CP210X_SERIAL_RTS_MASK; + flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_FLOW_CTL); + flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl); + cp210x_write_reg_block(port, + CP210X_SET_FLOW, + &flow_ctl, + sizeof(flow_ctl)); + cflag |= CRTSCTS; } else { dev_dbg(dev, "%s - flow control = NONE\n", __func__); diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index 216edd5826ca..ecda82198798 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -59,6 +59,7 @@ static const struct usb_device_id id_table_earthmate[] = { static const struct usb_device_id id_table_cyphidcomrs232[] = { { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, + { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { } /* Terminating entry */ @@ -73,6 +74,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, + { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h index 35e223751c0e..16b7410ad057 100644 --- a/drivers/usb/serial/cypress_m8.h +++ b/drivers/usb/serial/cypress_m8.h @@ -25,6 +25,9 @@ #define VENDOR_ID_CYPRESS 0x04b4 #define PRODUCT_ID_CYPHIDCOM 0x5500 +/* Simply Automated HID->COM UPB PIM (using Cypress PID 0x5500) */ +#define VENDOR_ID_SAI 0x17dd + /* FRWD Dongle - a GPS sports watch */ #define VENDOR_ID_FRWD 0x6737 #define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 9ad44a96dfe3..ae98fe94fe91 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -713,6 +713,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, @@ -2480,12 +2481,11 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port, #define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE) static int ftdi_process_packet(struct usb_serial_port *port, - struct ftdi_private *priv, char *packet, int len) + struct ftdi_private *priv, unsigned char *buf, int len) { + unsigned char status; int i; - char status; char flag; - char *ch; if (len < 2) { dev_dbg(&port->dev, "malformed packet\n"); @@ -2495,7 +2495,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, /* Compare new line status to the old one, signal if different/ N.B. packet may be processed more than once, but differences are only processed once. */ - status = packet[0] & FTDI_STATUS_B0_MASK; + status = buf[0] & FTDI_STATUS_B0_MASK; if (status != priv->prev_status) { char diff_status = status ^ priv->prev_status; @@ -2521,13 +2521,12 @@ static int ftdi_process_packet(struct usb_serial_port *port, } /* save if the transmitter is empty or not */ - if (packet[1] & FTDI_RS_TEMT) + if (buf[1] & FTDI_RS_TEMT) priv->transmit_empty = 1; else priv->transmit_empty = 0; - len -= 2; - if (!len) + if (len == 2) return 0; /* status only */ /* @@ -2535,40 +2534,41 @@ static int ftdi_process_packet(struct usb_serial_port *port, * data payload to avoid over-reporting. */ flag = TTY_NORMAL; - if (packet[1] & FTDI_RS_ERR_MASK) { + if (buf[1] & FTDI_RS_ERR_MASK) { /* Break takes precedence over parity, which takes precedence * over framing errors */ - if (packet[1] & FTDI_RS_BI) { + if (buf[1] & FTDI_RS_BI) { flag = TTY_BREAK; port->icount.brk++; usb_serial_handle_break(port); - } else if (packet[1] & FTDI_RS_PE) { + } else if (buf[1] & FTDI_RS_PE) { flag = TTY_PARITY; port->icount.parity++; - } else if (packet[1] & FTDI_RS_FE) { + } else if (buf[1] & FTDI_RS_FE) { flag = TTY_FRAME; port->icount.frame++; } /* Overrun is special, not associated with a char */ - if (packet[1] & FTDI_RS_OE) { + if (buf[1] & FTDI_RS_OE) { port->icount.overrun++; tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } } - port->icount.rx += len; - ch = packet + 2; + port->icount.rx += len - 2; if (port->port.console && port->sysrq) { - for (i = 0; i < len; i++, ch++) { - if (!usb_serial_handle_sysrq_char(port, *ch)) - tty_insert_flip_char(&port->port, *ch, flag); + for (i = 2; i < len; i++) { + if (usb_serial_handle_sysrq_char(port, buf[i])) + continue; + tty_insert_flip_char(&port->port, buf[i], flag); } } else { - tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len); + tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag, + len - 2); } - return len; + return len - 2; } static void ftdi_process_read_urb(struct urb *urb) diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index e8373528264c..b5ca17a5967a 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -160,6 +160,7 @@ #define XSENS_AWINDA_DONGLE_PID 0x0102 #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ #define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */ +#define XSENS_MTIUSBCONVERTER_PID 0x0301 /* MTi USB converter */ #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ /* Xsens devices using FTDI VID */ diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index d5bff69b1769..ffbb2a8901b2 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -353,10 +353,11 @@ static void iuu_led_activity_on(struct urb *urb) struct usb_serial_port *port = urb->context; int result; char *buf_ptr = port->write_urb->transfer_buffer; - *buf_ptr++ = IUU_SET_LED; + if (xmas) { - get_random_bytes(buf_ptr, 6); - *(buf_ptr+7) = 1; + buf_ptr[0] = IUU_SET_LED; + get_random_bytes(buf_ptr + 1, 6); + buf_ptr[7] = 1; } else { iuu_rgbf_fill_buffer(buf_ptr, 255, 255, 0, 0, 0, 0, 255); } @@ -374,13 +375,14 @@ static void iuu_led_activity_off(struct urb *urb) struct usb_serial_port *port = urb->context; int result; char *buf_ptr = port->write_urb->transfer_buffer; + if (xmas) { iuu_rxcmd(urb); return; - } else { - *buf_ptr++ = IUU_SET_LED; - iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255); } + + iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255); + usb_fill_bulk_urb(port->write_urb, port->serial->dev, usb_sndbulkpipe(port->serial->dev, port->bulk_out_endpointAddress), @@ -697,14 +699,16 @@ static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port, struct iuu_private *priv = usb_get_serial_port_data(port); unsigned long flags; - if (count > 256) - return -ENOMEM; - spin_lock_irqsave(&priv->lock, flags); + count = min(count, 256 - priv->writelen); + if (count == 0) + goto out; + /* fill the buffer */ memcpy(priv->writebuf + priv->writelen, buf, count); priv->writelen += count; +out: spin_unlock_irqrestore(&priv->lock, flags); return count; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 254a8bbeea67..f7a6ac05ac57 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -245,6 +245,7 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC25 0x0125 +#define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM12 0x0512 @@ -1093,12 +1094,18 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), .driver_info = RSVD(1) | RSVD(3) }, /* Quectel products using Quectel vendor ID */ - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), - .driver_info = RSVD(4) }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), - .driver_info = RSVD(4) }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), - .driver_info = RSVD(4) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, @@ -1816,6 +1823,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ .driver_info = RSVD(7) }, + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */ + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), @@ -2028,6 +2037,9 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ .driver_info = RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index d147feae83e6..0f60363c1bbc 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9062)}, /* Sierra Wireless EM7305 QDL */ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 5fcad96e0656..1ec1baa7604e 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -662,8 +662,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, if (devinfo->resetting) { cmnd->result = DID_ERROR << 16; cmnd->scsi_done(cmnd); - spin_unlock_irqrestore(&devinfo->lock, flags); - return 0; + goto zombie; } /* Find a free uas-tag */ @@ -699,6 +698,16 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB); err = uas_submit_urbs(cmnd, devinfo); + /* + * in case of fatal errors the SCSI layer is peculiar + * a command that has finished is a success for the purpose + * of queueing, no matter how fatal the error + */ + if (err == -ENODEV) { + cmnd->result = DID_ERROR << 16; + cmnd->scsi_done(cmnd); + goto zombie; + } if (err) { /* If we did nothing, give up now */ if (cmdinfo->state & SUBMIT_STATUS_URB) { @@ -709,6 +718,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, } devinfo->cmnd[idx] = cmnd; +zombie: spin_unlock_irqrestore(&devinfo->lock, flags); return 0; } diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index f6c3681fa2e9..88275842219e 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2328,7 +2328,7 @@ UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114, "JMicron", "USB to ATA/ATAPI Bridge", USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_BROKEN_FUA ), + US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ), /* Reported by Andrey Rahmatullin */ UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 37157ed9a881..dcdfcdfd2ad1 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -28,6 +28,13 @@ * and don't forget to CC: the USB development list */ +/* Reported-by: Till Dörges */ +UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999, + "Sony", + "PSZ-HA*", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + /* Reported-by: Julian Groß */ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, "LaCie", @@ -80,6 +87,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA), +/* Reported-by: Thinh Nguyen */ +UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999, + "PNY", + "Pro Elite SSD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* Reported-by: Hans de Goede */ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VIA", diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c index 017389021b96..b56a0880a044 100644 --- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c +++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c @@ -179,26 +179,6 @@ out: return tcpci_irq(chip->tcpci); } -static int rt1711h_init_alert(struct rt1711h_chip *chip, - struct i2c_client *client) -{ - int ret; - - /* Disable chip interrupts before requesting irq */ - ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0); - if (ret < 0) - return ret; - - ret = devm_request_threaded_irq(chip->dev, client->irq, NULL, - rt1711h_irq, - IRQF_ONESHOT | IRQF_TRIGGER_LOW, - dev_name(chip->dev), chip); - if (ret < 0) - return ret; - enable_irq_wake(client->irq); - return 0; -} - static int rt1711h_sw_reset(struct rt1711h_chip *chip) { int ret; @@ -260,7 +240,8 @@ static int rt1711h_probe(struct i2c_client *client, if (ret < 0) return ret; - ret = rt1711h_init_alert(chip, client); + /* Disable chip interrupts before requesting irq */ + ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0); if (ret < 0) return ret; @@ -271,6 +252,14 @@ static int rt1711h_probe(struct i2c_client *client, if (IS_ERR_OR_NULL(chip->tcpci)) return PTR_ERR(chip->tcpci); + ret = devm_request_threaded_irq(chip->dev, client->irq, NULL, + rt1711h_irq, + IRQF_ONESHOT | IRQF_TRIGGER_LOW, + dev_name(chip->dev), chip); + if (ret < 0) + return ret; + enable_irq_wake(client->irq); + return 0; } diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index ba288b964dc8..4d56408ac623 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -246,14 +246,18 @@ void ucsi_altmode_update_active(struct ucsi_connector *con) con->partner_altmode[i] == altmode); } -static u8 ucsi_altmode_next_mode(struct typec_altmode **alt, u16 svid) +static int ucsi_altmode_next_mode(struct typec_altmode **alt, u16 svid) { u8 mode = 1; int i; - for (i = 0; alt[i]; i++) + for (i = 0; alt[i]; i++) { + if (i > MODE_DISCOVERY_MAX) + return -ERANGE; + if (alt[i]->svid == svid) mode++; + } return mode; } @@ -288,8 +292,11 @@ static int ucsi_register_altmode(struct ucsi_connector *con, goto err; } - desc->mode = ucsi_altmode_next_mode(con->port_altmode, - desc->svid); + ret = ucsi_altmode_next_mode(con->port_altmode, desc->svid); + if (ret < 0) + return ret; + + desc->mode = ret; switch (desc->svid) { case USB_TYPEC_DP_SID: @@ -315,8 +322,11 @@ static int ucsi_register_altmode(struct ucsi_connector *con, goto err; } - desc->mode = ucsi_altmode_next_mode(con->partner_altmode, - desc->svid); + ret = ucsi_altmode_next_mode(con->partner_altmode, desc->svid); + if (ret < 0) + return ret; + + desc->mode = ret; alt = typec_partner_register_altmode(con->partner, desc); if (IS_ERR(alt)) { diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index a18112a83fae..dda8bd39c918 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -64,11 +64,15 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data) static int ucsi_acpi_probe(struct platform_device *pdev) { + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); struct ucsi_acpi *ua; struct resource *res; acpi_status status; int ret; + if (adev->dep_unmet) + return -EPROBE_DEFER; + ua = devm_kzalloc(&pdev->dev, sizeof(*ua), GFP_KERNEL); if (!ua) return -ENOMEM; diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index 7570c7602ab4..f32c582611eb 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -110,7 +110,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, "%s-%s", dev_driver_string(parent->dev), group->name); if (ret) { - kfree(type); + kobject_put(&type->kobj); return ERR_PTR(ret); } diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 02206162eaa9..0d16f9806655 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "vfio_pci_private.h" @@ -177,6 +178,7 @@ no_mmap: static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); static void vfio_pci_disable(struct vfio_pci_device *vdev); +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data); /* * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND @@ -688,6 +690,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, return 0; } +struct vfio_devices { + struct vfio_device **devices; + int cur_index; + int max_index; +}; + static long vfio_pci_ioctl(void *device_data, unsigned int cmd, unsigned long arg) { @@ -761,7 +769,7 @@ static long vfio_pci_ioctl(void *device_data, { void __iomem *io; size_t size; - u16 orig_cmd; + u16 cmd; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.flags = 0; @@ -781,10 +789,7 @@ static long vfio_pci_ioctl(void *device_data, * Is it really there? Enable memory decode for * implicit access in pci_map_rom(). */ - pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd); - pci_write_config_word(pdev, PCI_COMMAND, - orig_cmd | PCI_COMMAND_MEMORY); - + cmd = vfio_pci_memory_lock_and_enable(vdev); io = pci_map_rom(pdev, &size); if (io) { info.flags = VFIO_REGION_INFO_FLAG_READ; @@ -792,8 +797,8 @@ static long vfio_pci_ioctl(void *device_data, } else { info.size = 0; } + vfio_pci_memory_unlock_and_restore(vdev, cmd); - pci_write_config_word(pdev, PCI_COMMAND, orig_cmd); break; } case VFIO_PCI_VGA_REGION_INDEX: @@ -936,8 +941,16 @@ static long vfio_pci_ioctl(void *device_data, return ret; } else if (cmd == VFIO_DEVICE_RESET) { - return vdev->reset_works ? - pci_try_reset_function(vdev->pdev) : -EINVAL; + int ret; + + if (!vdev->reset_works) + return -EINVAL; + + vfio_pci_zap_and_down_write_memory_lock(vdev); + ret = pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + + return ret; } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { struct vfio_pci_hot_reset_info hdr; @@ -1017,8 +1030,9 @@ reset_info_exit: int32_t *group_fds; struct vfio_pci_group_entry *groups; struct vfio_pci_group_info info; + struct vfio_devices devs = { .cur_index = 0 }; bool slot = false; - int i, count = 0, ret = 0; + int i, group_idx, mem_idx = 0, count = 0, ret = 0; minsz = offsetofend(struct vfio_pci_hot_reset, count); @@ -1070,9 +1084,9 @@ reset_info_exit: * user interface and store the group and iommu ID. This * ensures the group is held across the reset. */ - for (i = 0; i < hdr.count; i++) { + for (group_idx = 0; group_idx < hdr.count; group_idx++) { struct vfio_group *group; - struct fd f = fdget(group_fds[i]); + struct fd f = fdget(group_fds[group_idx]); if (!f.file) { ret = -EBADF; break; @@ -1085,8 +1099,9 @@ reset_info_exit: break; } - groups[i].group = group; - groups[i].id = vfio_external_user_iommu_id(group); + groups[group_idx].group = group; + groups[group_idx].id = + vfio_external_user_iommu_id(group); } kfree(group_fds); @@ -1105,13 +1120,63 @@ reset_info_exit: ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_validate_devs, &info, slot); - if (!ret) - /* User has access, do the reset */ - ret = pci_reset_bus(vdev->pdev); + if (ret) + goto hot_reset_release; + + devs.max_index = count; + devs.devices = kcalloc(count, sizeof(struct vfio_device *), + GFP_KERNEL); + if (!devs.devices) { + ret = -ENOMEM; + goto hot_reset_release; + } + + /* + * We need to get memory_lock for each device, but devices + * can share mmap_sem, therefore we need to zap and hold + * the vma_lock for each device, and only then get each + * memory_lock. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_try_zap_and_vma_lock_cb, + &devs, slot); + if (ret) + goto hot_reset_release; + + for (; mem_idx < devs.cur_index; mem_idx++) { + struct vfio_pci_device *tmp; + + tmp = vfio_device_data(devs.devices[mem_idx]); + + ret = down_write_trylock(&tmp->memory_lock); + if (!ret) { + ret = -EBUSY; + goto hot_reset_release; + } + mutex_unlock(&tmp->vma_lock); + } + + /* User has access, do the reset */ + ret = pci_reset_bus(vdev->pdev); hot_reset_release: - for (i--; i >= 0; i--) - vfio_group_put_external_user(groups[i].group); + for (i = 0; i < devs.cur_index; i++) { + struct vfio_device *device; + struct vfio_pci_device *tmp; + + device = devs.devices[i]; + tmp = vfio_device_data(device); + + if (i < mem_idx) + up_write(&tmp->memory_lock); + else + mutex_unlock(&tmp->vma_lock); + vfio_device_put(device); + } + kfree(devs.devices); + + for (group_idx--; group_idx >= 0; group_idx--) + vfio_group_put_external_user(groups[group_idx].group); kfree(groups); return ret; @@ -1192,6 +1257,202 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf, return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true); } +/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */ +static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try) +{ + struct vfio_pci_mmap_vma *mmap_vma, *tmp; + + /* + * Lock ordering: + * vma_lock is nested under mmap_sem for vm_ops callback paths. + * The memory_lock semaphore is used by both code paths calling + * into this function to zap vmas and the vm_ops.fault callback + * to protect the memory enable state of the device. + * + * When zapping vmas we need to maintain the mmap_sem => vma_lock + * ordering, which requires using vma_lock to walk vma_list to + * acquire an mm, then dropping vma_lock to get the mmap_sem and + * reacquiring vma_lock. This logic is derived from similar + * requirements in uverbs_user_mmap_disassociate(). + * + * mmap_sem must always be the top-level lock when it is taken. + * Therefore we can only hold the memory_lock write lock when + * vma_list is empty, as we'd need to take mmap_sem to clear + * entries. vma_list can only be guaranteed empty when holding + * vma_lock, thus memory_lock is nested under vma_lock. + * + * This enables the vm_ops.fault callback to acquire vma_lock, + * followed by memory_lock read lock, while already holding + * mmap_sem without risk of deadlock. + */ + while (1) { + struct mm_struct *mm = NULL; + + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) + return 0; + } else { + mutex_lock(&vdev->vma_lock); + } + while (!list_empty(&vdev->vma_list)) { + mmap_vma = list_first_entry(&vdev->vma_list, + struct vfio_pci_mmap_vma, + vma_next); + mm = mmap_vma->vma->vm_mm; + if (mmget_not_zero(mm)) + break; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + mm = NULL; + } + if (!mm) + return 1; + mutex_unlock(&vdev->vma_lock); + + if (try) { + if (!down_read_trylock(&mm->mmap_sem)) { + mmput(mm); + return 0; + } + } else { + down_read(&mm->mmap_sem); + } + if (mmget_still_valid(mm)) { + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) { + up_read(&mm->mmap_sem); + mmput(mm); + return 0; + } + } else { + mutex_lock(&vdev->vma_lock); + } + list_for_each_entry_safe(mmap_vma, tmp, + &vdev->vma_list, vma_next) { + struct vm_area_struct *vma = mmap_vma->vma; + + if (vma->vm_mm != mm) + continue; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + + zap_vma_ptes(vma, vma->vm_start, + vma->vm_end - vma->vm_start); + } + mutex_unlock(&vdev->vma_lock); + } + up_read(&mm->mmap_sem); + mmput(mm); + } +} + +void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev) +{ + vfio_pci_zap_and_vma_lock(vdev, false); + down_write(&vdev->memory_lock); + mutex_unlock(&vdev->vma_lock); +} + +u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev) +{ + u16 cmd; + + down_write(&vdev->memory_lock); + pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MEMORY)) + pci_write_config_word(vdev->pdev, PCI_COMMAND, + cmd | PCI_COMMAND_MEMORY); + + return cmd; +} + +void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd) +{ + pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd); + up_write(&vdev->memory_lock); +} + +/* Caller holds vma_lock */ +static int __vfio_pci_add_vma(struct vfio_pci_device *vdev, + struct vm_area_struct *vma) +{ + struct vfio_pci_mmap_vma *mmap_vma; + + mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL); + if (!mmap_vma) + return -ENOMEM; + + mmap_vma->vma = vma; + list_add(&mmap_vma->vma_next, &vdev->vma_list); + + return 0; +} + +/* + * Zap mmaps on open so that we can fault them in on access and therefore + * our vma_list only tracks mappings accessed since last zap. + */ +static void vfio_pci_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void vfio_pci_mmap_close(struct vm_area_struct *vma) +{ + struct vfio_pci_device *vdev = vma->vm_private_data; + struct vfio_pci_mmap_vma *mmap_vma; + + mutex_lock(&vdev->vma_lock); + list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { + if (mmap_vma->vma == vma) { + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + break; + } + } + mutex_unlock(&vdev->vma_lock); +} + +static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct vfio_pci_device *vdev = vma->vm_private_data; + vm_fault_t ret = VM_FAULT_NOPAGE; + + mutex_lock(&vdev->vma_lock); + down_read(&vdev->memory_lock); + + if (!__vfio_pci_memory_enabled(vdev)) { + ret = VM_FAULT_SIGBUS; + mutex_unlock(&vdev->vma_lock); + goto up_out; + } + + if (__vfio_pci_add_vma(vdev, vma)) { + ret = VM_FAULT_OOM; + mutex_unlock(&vdev->vma_lock); + goto up_out; + } + + mutex_unlock(&vdev->vma_lock); + + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + ret = VM_FAULT_SIGBUS; + +up_out: + up_read(&vdev->memory_lock); + return ret; +} + +static const struct vm_operations_struct vfio_pci_mmap_ops = { + .open = vfio_pci_mmap_open, + .close = vfio_pci_mmap_close, + .fault = vfio_pci_mmap_fault, +}; + static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) { struct vfio_pci_device *vdev = device_data; @@ -1250,8 +1511,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; - return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - req_len, vma->vm_page_prot); + /* + * See remap_pfn_range(), called from vfio_pci_fault() but we can't + * change vm_flags within the fault handler. Set them now. + */ + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &vfio_pci_mmap_ops; + + return 0; } static void vfio_pci_request(void *device_data, unsigned int count) @@ -1327,6 +1594,9 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&vdev->irqlock); mutex_init(&vdev->ioeventfds_lock); INIT_LIST_HEAD(&vdev->ioeventfds_list); + mutex_init(&vdev->vma_lock); + INIT_LIST_HEAD(&vdev->vma_list); + init_rwsem(&vdev->memory_lock); ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); if (ret) { @@ -1516,12 +1786,6 @@ static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck) kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock); } -struct vfio_devices { - struct vfio_device **devices; - int cur_index; - int max_index; -}; - static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data) { struct vfio_devices *devs = data; @@ -1552,6 +1816,39 @@ static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data) return 0; } +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data) +{ + struct vfio_devices *devs = data; + struct vfio_device *device; + struct vfio_pci_device *vdev; + + if (devs->cur_index == devs->max_index) + return -ENOSPC; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return -EINVAL; + + if (pci_dev_driver(pdev) != &vfio_pci_driver) { + vfio_device_put(device); + return -EBUSY; + } + + vdev = vfio_device_data(device); + + /* + * Locking multiple devices is prone to deadlock, runaway and + * unwind if we hit contention. + */ + if (!vfio_pci_zap_and_vma_lock(vdev, true)) { + vfio_device_put(device); + return -EBUSY; + } + + devs->devices[devs->cur_index++] = device; + return 0; +} + /* * If a bus or slot reset is available for the provided device and: * - All of the devices affected by that bus or slot reset are unused diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index f0891bd8444c..927b608461c8 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -395,6 +395,20 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) *(__le32 *)(&p->write[off]) = cpu_to_le32(write); } +/* Caller should hold memory_lock semaphore */ +bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) +{ + struct pci_dev *pdev = vdev->pdev; + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); + + /* + * SR-IOV VF memory enable is handled by the MSE bit in the + * PF SR-IOV capability, there's therefore no need to trigger + * faults based on the virtual value. + */ + return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY); +} + /* * Restore the *real* BARs after we detect a FLR or backdoor reset. * (backdoor = some device specific technique that we didn't catch) @@ -554,13 +568,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, new_cmd = le32_to_cpu(val); + phys_io = !!(phys_cmd & PCI_COMMAND_IO); + virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); + new_io = !!(new_cmd & PCI_COMMAND_IO); + phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY); virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY); new_mem = !!(new_cmd & PCI_COMMAND_MEMORY); - phys_io = !!(phys_cmd & PCI_COMMAND_IO); - virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); - new_io = !!(new_cmd & PCI_COMMAND_IO); + if (!new_mem) + vfio_pci_zap_and_down_write_memory_lock(vdev); + else + down_write(&vdev->memory_lock); /* * If the user is writing mem/io enable (new_mem/io) and we @@ -577,8 +596,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, } count = vfio_default_config_write(vdev, pos, count, perm, offset, val); - if (count < 0) + if (count < 0) { + if (offset == PCI_COMMAND) + up_write(&vdev->memory_lock); return count; + } /* * Save current memory/io enable bits in vconfig to allow for @@ -589,6 +611,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, *virt_cmd &= cpu_to_le16(~mask); *virt_cmd |= cpu_to_le16(new_cmd & mask); + + up_write(&vdev->memory_lock); } /* Emulate INTx disable */ @@ -826,8 +850,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, pos - offset + PCI_EXP_DEVCAP, &cap); - if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) + if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } } /* @@ -905,8 +932,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos, pos - offset + PCI_AF_CAP, &cap); - if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) + if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } } return count; @@ -1460,7 +1490,12 @@ static int vfio_cap_init(struct vfio_pci_device *vdev) if (ret) return ret; - if (cap <= PCI_CAP_ID_MAX) { + /* + * ID 0 is a NULL capability, conflicting with our fake + * PCI_CAP_ID_BASIC. As it has no content, consider it + * hidden for now. + */ + if (cap && cap <= PCI_CAP_ID_MAX) { len = pci_cap_length[cap]; if (len == 0xFF) { /* Variable length */ len = vfio_cap_len(vdev, cap, pos); @@ -1697,6 +1732,15 @@ int vfio_config_init(struct vfio_pci_device *vdev) vconfig[PCI_INTERRUPT_PIN]); vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ + + /* + * VFs do no implement the memory enable bit of the COMMAND + * register therefore we'll not have it set in our initial + * copy of config space after pci_enable_device(). For + * consistency with PFs, set the virtual enable bit here. + */ + *(__le16 *)&vconfig[PCI_COMMAND] |= + cpu_to_le16(PCI_COMMAND_MEMORY); } if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) @@ -1726,8 +1770,11 @@ void vfio_config_free(struct vfio_pci_device *vdev) vdev->vconfig = NULL; kfree(vdev->pci_config_map); vdev->pci_config_map = NULL; - kfree(vdev->msi_perm); - vdev->msi_perm = NULL; + if (vdev->msi_perm) { + free_perm_bits(vdev->msi_perm); + kfree(vdev->msi_perm); + vdev->msi_perm = NULL; + } } /* diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 2056f3f85f59..1d9fb2592945 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -249,6 +249,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) struct pci_dev *pdev = vdev->pdev; unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; int ret; + u16 cmd; if (!is_irq_none(vdev)) return -EINVAL; @@ -258,13 +259,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) return -ENOMEM; /* return the number of supported vectors if we can't get all: */ + cmd = vfio_pci_memory_lock_and_enable(vdev); ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); if (ret < nvec) { if (ret > 0) pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); kfree(vdev->ctx); return ret; } + vfio_pci_memory_unlock_and_restore(vdev, cmd); vdev->num_ctx = nvec; vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : @@ -287,6 +291,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, struct pci_dev *pdev = vdev->pdev; struct eventfd_ctx *trigger; int irq, ret; + u16 cmd; if (vector < 0 || vector >= vdev->num_ctx) return -EINVAL; @@ -295,7 +300,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, if (vdev->ctx[vector].trigger) { irq_bypass_unregister_producer(&vdev->ctx[vector].producer); + + cmd = vfio_pci_memory_lock_and_enable(vdev); free_irq(irq, vdev->ctx[vector].trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + kfree(vdev->ctx[vector].name); eventfd_ctx_put(vdev->ctx[vector].trigger); vdev->ctx[vector].trigger = NULL; @@ -323,6 +332,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, * such a reset it would be unsuccessful. To avoid this, restore the * cached value of the message prior to enabling. */ + cmd = vfio_pci_memory_lock_and_enable(vdev); if (msix) { struct msi_msg msg; @@ -332,6 +342,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, ret = request_irq(irq, vfio_msihandler, 0, vdev->ctx[vector].name, trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); if (ret) { kfree(vdev->ctx[vector].name); eventfd_ctx_put(trigger); @@ -376,6 +387,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) { struct pci_dev *pdev = vdev->pdev; int i; + u16 cmd; for (i = 0; i < vdev->num_ctx; i++) { vfio_virqfd_disable(&vdev->ctx[i].unmask); @@ -384,7 +396,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); + cmd = vfio_pci_memory_lock_and_enable(vdev); pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); /* * Both disable paths above use pci_intx_for_msi() to clear DisINTx diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index ee6ee91718a4..987b4d311fde 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -84,6 +84,11 @@ struct vfio_pci_reflck { struct mutex lock; }; +struct vfio_pci_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + struct vfio_pci_device { struct pci_dev *pdev; void __iomem *barmap[PCI_STD_RESOURCE_END + 1]; @@ -122,6 +127,9 @@ struct vfio_pci_device { struct list_head dummy_resources_list; struct mutex ioeventfds_lock; struct list_head ioeventfds_list; + struct mutex vma_lock; + struct list_head vma_list; + struct rw_semaphore memory_lock; }; #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) @@ -164,6 +172,13 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev, pci_power_t state); +extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev); +extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device + *vdev); +extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev); +extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, + u16 cmd); + #ifdef CONFIG_VFIO_PCI_IGD extern int vfio_pci_igd_init(struct vfio_pci_device *vdev); #else diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 0120d8324a40..83f81d24df78 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -162,6 +162,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, size_t x_start = 0, x_end = 0; resource_size_t end; void __iomem *io; + struct resource *res = &vdev->pdev->resource[bar]; ssize_t done; if (pci_resource_start(pdev, bar)) @@ -177,6 +178,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, count = min(count, (size_t)(end - pos)); + if (res->flags & IORESOURCE_MEM) { + down_read(&vdev->memory_lock); + if (!__vfio_pci_memory_enabled(vdev)) { + up_read(&vdev->memory_lock); + return -EIO; + } + } + if (bar == PCI_ROM_RESOURCE) { /* * The ROM can fill less space than the BAR, so we start the @@ -184,13 +193,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, * filling large ROM BARs much faster. */ io = pci_map_rom(pdev, &x_start); - if (!io) - return -ENOMEM; + if (!io) { + done = -ENOMEM; + goto out; + } x_end = end; } else { int ret = vfio_pci_setup_barmap(vdev, bar); - if (ret) - return ret; + if (ret) { + done = ret; + goto out; + } io = vdev->barmap[bar]; } @@ -207,6 +220,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, if (bar == PCI_ROM_RESOURCE) pci_unmap_rom(pdev, io); +out: + if (res->flags & IORESOURCE_MEM) + up_read(&vdev->memory_lock); return done; } diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 6cc47af1f06d..c6220f57fdf3 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -335,6 +335,32 @@ static int put_pfn(unsigned long pfn, int prot) return 0; } +static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long vaddr, unsigned long *pfn, + bool write_fault) +{ + int ret; + + ret = follow_pfn(vma, vaddr, pfn); + if (ret) { + bool unlocked = false; + + ret = fixup_user_fault(NULL, mm, vaddr, + FAULT_FLAG_REMOTE | + (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + if (unlocked) + return -EAGAIN; + + if (ret) + return ret; + + ret = follow_pfn(vma, vaddr, pfn); + } + + return ret; +} + static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, int prot, unsigned long *pfn) { @@ -377,12 +403,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, vaddr = untagged_addr(vaddr); +retry: vma = find_vma_intersection(mm, vaddr, vaddr + 1); if (vma && vma->vm_flags & VM_PFNMAP) { - if (!follow_pfn(vma, vaddr, pfn) && - is_invalid_reserved_pfn(*pfn)) - ret = 0; + ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); + if (ret == -EAGAIN) + goto retry; + + if (!ret && !is_invalid_reserved_pfn(*pfn)) + ret = -EFAULT; } up_read(&mm->mmap_sem); @@ -1187,13 +1217,16 @@ static int vfio_bus_type(struct device *dev, void *data) static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *domain) { - struct vfio_domain *d; + struct vfio_domain *d = NULL; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret; /* Arbitrarily pick the first domain in the list for lookups */ - d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); + if (!list_empty(&iommu->domain_list)) + d = list_first_entry(&iommu->domain_list, + struct vfio_domain, next); + n = rb_first(&iommu->dma_list); for (; n; n = rb_next(n)) { @@ -1211,6 +1244,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, phys_addr_t p; dma_addr_t i; + if (WARN_ON(!d)) { /* mapped w/o a domain?! */ + ret = -EINVAL; + goto unwind; + } + phys = iommu_iova_to_phys(d->domain, iova); if (WARN_ON(!phys)) { @@ -1240,7 +1278,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; - return ret; + goto unwind; } phys = pfn << PAGE_SHIFT; @@ -1249,14 +1287,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, ret = iommu_map(domain->domain, iova, phys, size, dma->prot | domain->prot); - if (ret) - return ret; + if (ret) { + if (!dma->iommu_mapped) + vfio_unpin_pages_remote(dma, iova, + phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, + true); + goto unwind; + } iova += size; } + } + + /* All dmas are now mapped, defer to second tree walk for unwind */ + for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma->iommu_mapped = true; } + return 0; + +unwind: + for (; n; n = rb_prev(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma_addr_t iova; + + if (dma->iommu_mapped) { + iommu_unmap(domain->domain, dma->iova, dma->size); + continue; + } + + iova = dma->iova; + while (iova < dma->iova + dma->size) { + phys_addr_t phys, p; + size_t size; + dma_addr_t i; + + phys = iommu_iova_to_phys(domain->domain, iova); + if (!phys) { + iova += PAGE_SIZE; + continue; + } + + size = PAGE_SIZE; + p = phys + size; + i = iova + size; + while (i < dma->iova + dma->size && + p == iommu_iova_to_phys(domain->domain, i)) { + size += PAGE_SIZE; + p += PAGE_SIZE; + i += PAGE_SIZE; + } + + iommu_unmap(domain->domain, iova, size); + vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, true); + } + } + + return ret; } /* diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index a9caf1bc3c3e..f63f84a25725 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1215,7 +1215,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) continue; } - switch (v_req.type) { + switch (vhost32_to_cpu(vq, v_req.type)) { case VIRTIO_SCSI_T_TMF: vc.req = &v_req.tmf; vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req); @@ -2290,6 +2290,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = { static const struct target_core_fabric_ops vhost_scsi_ops = { .module = THIS_MODULE, .fabric_name = "vhost", + .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS, .tpg_get_wwn = vhost_scsi_get_fabric_wwn, .tpg_get_tag = vhost_scsi_get_tpgt, .tpg_check_demo_mode = vhost_scsi_check_true, diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c index f68920131a4a..e94932c69f54 100644 --- a/drivers/video/backlight/lp855x_bl.c +++ b/drivers/video/backlight/lp855x_bl.c @@ -456,7 +456,7 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) ret = regulator_enable(lp->enable); if (ret < 0) { dev_err(lp->dev, "failed to enable vddio: %d\n", ret); - return ret; + goto disable_supply; } /* @@ -471,24 +471,34 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) ret = lp855x_configure(lp); if (ret) { dev_err(lp->dev, "device config err: %d", ret); - return ret; + goto disable_vddio; } ret = lp855x_backlight_register(lp); if (ret) { dev_err(lp->dev, "failed to register backlight. err: %d\n", ret); - return ret; + goto disable_vddio; } ret = sysfs_create_group(&lp->dev->kobj, &lp855x_attr_group); if (ret) { dev_err(lp->dev, "failed to register sysfs. err: %d\n", ret); - return ret; + goto disable_vddio; } backlight_update_status(lp->bl); + return 0; + +disable_vddio: + if (lp->enable) + regulator_disable(lp->enable); +disable_supply: + if (lp->supply) + regulator_disable(lp->supply); + + return ret; } static int lp855x_remove(struct i2c_client *cl) @@ -497,6 +507,8 @@ static int lp855x_remove(struct i2c_client *cl) lp->bl->props.brightness = 0; backlight_update_status(lp->bl); + if (lp->enable) + regulator_disable(lp->enable); if (lp->supply) regulator_disable(lp->supply); sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group); diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index c10e17fb9a9a..3b432a18b5ab 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -22,52 +22,6 @@ config VGA_CONSOLE Say Y. -config VGACON_SOFT_SCROLLBACK - bool "Enable Scrollback Buffer in System RAM" - depends on VGA_CONSOLE - default n - help - The scrollback buffer of the standard VGA console is located in - the VGA RAM. The size of this RAM is fixed and is quite small. - If you require a larger scrollback buffer, this can be placed in - System RAM which is dynamically allocated during initialization. - Placing the scrollback buffer in System RAM will slightly slow - down the console. - - If you want this feature, say 'Y' here and enter the amount of - RAM to allocate for this buffer. If unsure, say 'N'. - -config VGACON_SOFT_SCROLLBACK_SIZE - int "Scrollback Buffer Size (in KB)" - depends on VGACON_SOFT_SCROLLBACK - range 1 1024 - default "64" - help - Enter the amount of System RAM to allocate for scrollback - buffers of VGA consoles. Each 64KB will give you approximately - 16 80x25 screenfuls of scrollback buffer. - -config VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT - bool "Persistent Scrollback History for each console by default" - depends on VGACON_SOFT_SCROLLBACK - default n - help - Say Y here if the scrollback history should persist by default when - switching between consoles. Otherwise, the scrollback history will be - flushed each time the console is switched. This feature can also be - enabled using the boot command line parameter - 'vgacon.scrollback_persistent=1'. - - This feature might break your tool of choice to flush the scrollback - buffer, e.g. clear(1) will work fine but Debian's clear_console(1) - will be broken, which might cause security issues. - You can use the escape sequence \e[3J instead if this feature is - activated. - - Note that a buffer of VGACON_SOFT_SCROLLBACK_SIZE is taken for each - created tty device. - So if you use a RAM-constrained system, say N here. - config MDA_CONSOLE depends on !M68K && !PARISC && ISA tristate "MDA text console (dual-headed)" diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c index 00dddf6e08b0..2d2ee17052e8 100644 --- a/drivers/video/console/newport_con.c +++ b/drivers/video/console/newport_con.c @@ -32,6 +32,8 @@ #include #include +#define NEWPORT_LEN 0x10000 + #define FONT_DATA ((unsigned char *)font_vga_8x16.data) /* borrowed from fbcon.c */ @@ -43,6 +45,7 @@ static unsigned char *font_data[MAX_NR_CONSOLES]; static struct newport_regs *npregs; +static unsigned long newport_addr; static int logo_active; static int topscan; @@ -702,7 +705,6 @@ const struct consw newport_con = { static int newport_probe(struct gio_device *dev, const struct gio_device_id *id) { - unsigned long newport_addr; int err; if (!dev->resource.start) @@ -712,7 +714,7 @@ static int newport_probe(struct gio_device *dev, return -EBUSY; /* we only support one Newport as console */ newport_addr = dev->resource.start + 0xF0000; - if (!request_mem_region(newport_addr, 0x10000, "Newport")) + if (!request_mem_region(newport_addr, NEWPORT_LEN, "Newport")) return -ENODEV; npregs = (struct newport_regs *)/* ioremap cannot fail */ @@ -720,6 +722,11 @@ static int newport_probe(struct gio_device *dev, console_lock(); err = do_take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1); console_unlock(); + + if (err) { + iounmap((void *)npregs); + release_mem_region(newport_addr, NEWPORT_LEN); + } return err; } @@ -727,6 +734,7 @@ static void newport_remove(struct gio_device *dev) { give_up_console(&newport_con); iounmap((void *)npregs); + release_mem_region(newport_addr, NEWPORT_LEN); } static struct gio_device_id newport_ids[] = { diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index bfaa9ec4bc1f..55507df335bd 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -165,210 +165,6 @@ static inline void vga_set_mem_top(struct vc_data *c) write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); } -#ifdef CONFIG_VGACON_SOFT_SCROLLBACK -/* software scrollback */ -struct vgacon_scrollback_info { - void *data; - int tail; - int size; - int rows; - int cnt; - int cur; - int save; - int restore; -}; - -static struct vgacon_scrollback_info *vgacon_scrollback_cur; -static struct vgacon_scrollback_info vgacon_scrollbacks[MAX_NR_CONSOLES]; -static bool scrollback_persistent = \ - IS_ENABLED(CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT); -module_param_named(scrollback_persistent, scrollback_persistent, bool, 0000); -MODULE_PARM_DESC(scrollback_persistent, "Enable persistent scrollback for all vga consoles"); - -static void vgacon_scrollback_reset(int vc_num, size_t reset_size) -{ - struct vgacon_scrollback_info *scrollback = &vgacon_scrollbacks[vc_num]; - - if (scrollback->data && reset_size > 0) - memset(scrollback->data, 0, reset_size); - - scrollback->cnt = 0; - scrollback->tail = 0; - scrollback->cur = 0; -} - -static void vgacon_scrollback_init(int vc_num) -{ - int pitch = vga_video_num_columns * 2; - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - int rows = size / pitch; - void *data; - - data = kmalloc_array(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, - GFP_NOWAIT); - - vgacon_scrollbacks[vc_num].data = data; - vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; - - vgacon_scrollback_cur->rows = rows - 1; - vgacon_scrollback_cur->size = rows * pitch; - - vgacon_scrollback_reset(vc_num, size); -} - -static void vgacon_scrollback_switch(int vc_num) -{ - if (!scrollback_persistent) - vc_num = 0; - - if (!vgacon_scrollbacks[vc_num].data) { - vgacon_scrollback_init(vc_num); - } else { - if (scrollback_persistent) { - vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; - } else { - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - - vgacon_scrollback_reset(vc_num, size); - } - } -} - -static void vgacon_scrollback_startup(void) -{ - vgacon_scrollback_cur = &vgacon_scrollbacks[0]; - vgacon_scrollback_init(0); -} - -static void vgacon_scrollback_update(struct vc_data *c, int t, int count) -{ - void *p; - - if (!vgacon_scrollback_cur->data || !vgacon_scrollback_cur->size || - c->vc_num != fg_console) - return; - - p = (void *) (c->vc_origin + t * c->vc_size_row); - - while (count--) { - scr_memcpyw(vgacon_scrollback_cur->data + - vgacon_scrollback_cur->tail, - p, c->vc_size_row); - - vgacon_scrollback_cur->cnt++; - p += c->vc_size_row; - vgacon_scrollback_cur->tail += c->vc_size_row; - - if (vgacon_scrollback_cur->tail >= vgacon_scrollback_cur->size) - vgacon_scrollback_cur->tail = 0; - - if (vgacon_scrollback_cur->cnt > vgacon_scrollback_cur->rows) - vgacon_scrollback_cur->cnt = vgacon_scrollback_cur->rows; - - vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; - } -} - -static void vgacon_restore_screen(struct vc_data *c) -{ - c->vc_origin = c->vc_visible_origin; - vgacon_scrollback_cur->save = 0; - - if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { - scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, - c->vc_screenbuf_size > vga_vram_size ? - vga_vram_size : c->vc_screenbuf_size); - vgacon_scrollback_cur->restore = 1; - vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; - } -} - -static void vgacon_scrolldelta(struct vc_data *c, int lines) -{ - int start, end, count, soff; - - if (!lines) { - vgacon_restore_screen(c); - return; - } - - if (!vgacon_scrollback_cur->data) - return; - - if (!vgacon_scrollback_cur->save) { - vgacon_cursor(c, CM_ERASE); - vgacon_save_screen(c); - c->vc_origin = (unsigned long)c->vc_screenbuf; - vgacon_scrollback_cur->save = 1; - } - - vgacon_scrollback_cur->restore = 0; - start = vgacon_scrollback_cur->cur + lines; - end = start + abs(lines); - - if (start < 0) - start = 0; - - if (start > vgacon_scrollback_cur->cnt) - start = vgacon_scrollback_cur->cnt; - - if (end < 0) - end = 0; - - if (end > vgacon_scrollback_cur->cnt) - end = vgacon_scrollback_cur->cnt; - - vgacon_scrollback_cur->cur = start; - count = end - start; - soff = vgacon_scrollback_cur->tail - - ((vgacon_scrollback_cur->cnt - end) * c->vc_size_row); - soff -= count * c->vc_size_row; - - if (soff < 0) - soff += vgacon_scrollback_cur->size; - - count = vgacon_scrollback_cur->cnt - start; - - if (count > c->vc_rows) - count = c->vc_rows; - - if (count) { - int copysize; - - int diff = c->vc_rows - count; - void *d = (void *) c->vc_visible_origin; - void *s = (void *) c->vc_screenbuf; - - count *= c->vc_size_row; - /* how much memory to end of buffer left? */ - copysize = min(count, vgacon_scrollback_cur->size - soff); - scr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize); - d += copysize; - count -= copysize; - - if (count) { - scr_memcpyw(d, vgacon_scrollback_cur->data, count); - d += count; - } - - if (diff) - scr_memcpyw(d, s, diff * c->vc_size_row); - } else - vgacon_cursor(c, CM_MOVE); -} - -static void vgacon_flush_scrollback(struct vc_data *c) -{ - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - - vgacon_scrollback_reset(c->vc_num, size); -} -#else -#define vgacon_scrollback_startup(...) do { } while (0) -#define vgacon_scrollback_init(...) do { } while (0) -#define vgacon_scrollback_update(...) do { } while (0) -#define vgacon_scrollback_switch(...) do { } while (0) - static void vgacon_restore_screen(struct vc_data *c) { if (c->vc_origin != c->vc_visible_origin) @@ -382,11 +178,6 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) vga_set_mem_top(c); } -static void vgacon_flush_scrollback(struct vc_data *c) -{ -} -#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */ - static const char *vgacon_startup(void) { const char *display_desc = NULL; @@ -569,10 +360,7 @@ static const char *vgacon_startup(void) vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH; vgacon_yres = vga_scan_lines; - if (!vga_init_done) { - vgacon_scrollback_startup(); - vga_init_done = true; - } + vga_init_done = true; return display_desc; } @@ -863,7 +651,6 @@ static int vgacon_switch(struct vc_data *c) vgacon_doresize(c, c->vc_cols, c->vc_rows); } - vgacon_scrollback_switch(c->vc_num); return 0; /* Redrawing not needed */ } @@ -1380,7 +1167,6 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, oldo = c->vc_origin; delta = lines * c->vc_size_row; if (dir == SM_UP) { - vgacon_scrollback_update(c, t, lines); if (c->vc_scr_end + delta >= vga_vram_end) { scr_memcpyw((u16 *) vga_vram_base, (u16 *) (oldo + delta), @@ -1444,7 +1230,6 @@ const struct consw vga_con = { .con_save_screen = vgacon_save_screen, .con_build_attr = vgacon_build_attr, .con_invert_region = vgacon_invert_region, - .con_flush_scrollback = vgacon_flush_scrollback, }; EXPORT_SYMBOL(vga_con); diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index ca935c09a261..436365efae73 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -216,7 +216,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = info->var.xoffset + rs; region.dy = 0; region.width = rw; @@ -224,7 +224,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset; region.dy = info->var.yoffset + bs; region.width = rs; @@ -234,7 +234,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, } static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -247,15 +247,6 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 22070cfea1d0..dc7f5c4f0607 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -122,12 +122,6 @@ static int logo_lines; /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO enums. */ static int logo_shown = FBCON_LOGO_CANSHOW; -/* Software scrollback */ -static int fbcon_softback_size = 32768; -static unsigned long softback_buf, softback_curr; -static unsigned long softback_in; -static unsigned long softback_top, softback_end; -static int softback_lines; /* console mappings */ static int first_fb_vc; static int last_fb_vc = MAX_NR_CONSOLES - 1; @@ -167,8 +161,6 @@ static int margin_color; static const struct consw fb_con; -#define CM_SOFTBACK (8) - #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row) static int fbcon_set_origin(struct vc_data *); @@ -373,18 +365,6 @@ static int get_color(struct vc_data *vc, struct fb_info *info, return color; } -static void fbcon_update_softback(struct vc_data *vc) -{ - int l = fbcon_softback_size / vc->vc_size_row; - - if (l > 5) - softback_end = softback_buf + l * vc->vc_size_row; - else - /* Smaller scrollback makes no sense, and 0 would screw - the operation totally */ - softback_top = 0; -} - static void fb_flashcursor(struct work_struct *work) { struct fb_info *info = container_of(work, struct fb_info, queue); @@ -414,7 +394,7 @@ static void fb_flashcursor(struct work_struct *work) c = scr_readw((u16 *) vc->vc_pos); mode = (!ops->cursor_flash || ops->cursor_state.enable) ? CM_ERASE : CM_DRAW; - ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1), + ops->cursor(vc, info, mode, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); console_unlock(); } @@ -471,13 +451,7 @@ static int __init fb_console_setup(char *this_opt) } if (!strncmp(options, "scrollback:", 11)) { - options += 11; - if (*options) { - fbcon_softback_size = simple_strtoul(options, &options, 0); - if (*options == 'k' || *options == 'K') { - fbcon_softback_size *= 1024; - } - } + pr_warn("Ignoring scrollback size option\n"); continue; } @@ -1016,31 +990,6 @@ static const char *fbcon_startup(void) set_blitting_type(vc, info); - if (info->fix.type != FB_TYPE_TEXT) { - if (fbcon_softback_size) { - if (!softback_buf) { - softback_buf = - (unsigned long) - kvmalloc(fbcon_softback_size, - GFP_KERNEL); - if (!softback_buf) { - fbcon_softback_size = 0; - softback_top = 0; - } - } - } else { - if (softback_buf) { - kvfree((void *) softback_buf); - softback_buf = 0; - softback_top = 0; - } - } - if (softback_buf) - softback_in = softback_top = softback_curr = - softback_buf; - softback_lines = 0; - } - /* Setup default font */ if (!p->fontdata && !vc->vc_font.data) { if (!fontname[0] || !(font = find_font(fontname))) @@ -1214,9 +1163,6 @@ static void fbcon_init(struct vc_data *vc, int init) if (logo) fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); - if (vc == svc && softback_buf) - fbcon_update_softback(vc); - if (ops->rotate_font && ops->rotate_font(info, vc)) { ops->rotate = FB_ROTATE_UR; set_blitting_type(vc, info); @@ -1379,7 +1325,6 @@ static void fbcon_cursor(struct vc_data *vc, int mode) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; - int y; int c = scr_readw((u16 *) vc->vc_pos); ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); @@ -1393,16 +1338,8 @@ static void fbcon_cursor(struct vc_data *vc, int mode) fbcon_add_cursor_timer(info); ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; - if (mode & CM_SOFTBACK) { - mode &= ~CM_SOFTBACK; - y = softback_lines; - } else { - if (softback_lines) - fbcon_set_origin(vc); - y = 0; - } - ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1), + ops->cursor(vc, info, mode, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); } @@ -1473,8 +1410,6 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, if (con_is_visible(vc)) { update_screen(vc); - if (softback_buf) - fbcon_update_softback(vc); } } @@ -1612,99 +1547,6 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) scrollback_current = 0; } -static void fbcon_redraw_softback(struct vc_data *vc, struct fbcon_display *p, - long delta) -{ - int count = vc->vc_rows; - unsigned short *d, *s; - unsigned long n; - int line = 0; - - d = (u16 *) softback_curr; - if (d == (u16 *) softback_in) - d = (u16 *) vc->vc_origin; - n = softback_curr + delta * vc->vc_size_row; - softback_lines -= delta; - if (delta < 0) { - if (softback_curr < softback_top && n < softback_buf) { - n += softback_end - softback_buf; - if (n < softback_top) { - softback_lines -= - (softback_top - n) / vc->vc_size_row; - n = softback_top; - } - } else if (softback_curr >= softback_top - && n < softback_top) { - softback_lines -= - (softback_top - n) / vc->vc_size_row; - n = softback_top; - } - } else { - if (softback_curr > softback_in && n >= softback_end) { - n += softback_buf - softback_end; - if (n > softback_in) { - n = softback_in; - softback_lines = 0; - } - } else if (softback_curr <= softback_in && n > softback_in) { - n = softback_in; - softback_lines = 0; - } - } - if (n == softback_curr) - return; - softback_curr = n; - s = (u16 *) softback_curr; - if (s == (u16 *) softback_in) - s = (u16 *) vc->vc_origin; - while (count--) { - unsigned short *start; - unsigned short *le; - unsigned short c; - int x = 0; - unsigned short attr = 1; - - start = s; - le = advance_row(s, 1); - do { - c = scr_readw(s); - if (attr != (c & 0xff00)) { - attr = c & 0xff00; - if (s > start) { - fbcon_putcs(vc, start, s - start, - line, x); - x += s - start; - start = s; - } - } - if (c == scr_readw(d)) { - if (s > start) { - fbcon_putcs(vc, start, s - start, - line, x); - x += s - start + 1; - start = s + 1; - } else { - x++; - start++; - } - } - s++; - d++; - } while (s < le); - if (s > start) - fbcon_putcs(vc, start, s - start, line, x); - line++; - if (d == (u16 *) softback_end) - d = (u16 *) softback_buf; - if (d == (u16 *) softback_in) - d = (u16 *) vc->vc_origin; - if (s == (u16 *) softback_end) - s = (u16 *) softback_buf; - if (s == (u16 *) softback_in) - s = (u16 *) vc->vc_origin; - } -} - static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, int line, int count, int dy) { @@ -1844,31 +1686,6 @@ static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p, } } -static inline void fbcon_softback_note(struct vc_data *vc, int t, - int count) -{ - unsigned short *p; - - if (vc->vc_num != fg_console) - return; - p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row); - - while (count) { - scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row); - count--; - p = advance_row(p, 1); - softback_in += vc->vc_size_row; - if (softback_in == softback_end) - softback_in = softback_buf; - if (softback_in == softback_top) { - softback_top += vc->vc_size_row; - if (softback_top == softback_end) - softback_top = softback_buf; - } - } - softback_curr = softback_in; -} - static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, enum con_scroll dir, unsigned int count) { @@ -1891,8 +1708,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, case SM_UP: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - if (softback_top) - fbcon_softback_note(vc, t, count); if (logo_shown >= 0) goto redraw_up; switch (p->scrollmode) { @@ -2185,6 +2000,9 @@ static void updatescrollmode(struct fbcon_display *p, } } +#define PITCH(w) (((w) + 7) >> 3) +#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */ + static int fbcon_resize(struct vc_data *vc, unsigned int width, unsigned int height, unsigned int user) { @@ -2194,6 +2012,24 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, struct fb_var_screeninfo var = info->var; int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh; + if (p->userfont && FNTSIZE(vc->vc_font.data)) { + int size; + int pitch = PITCH(vc->vc_font.width); + + /* + * If user font, ensure that a possible change to user font + * height or width will not allow a font data out-of-bounds access. + * NOTE: must use original charcount in calculation as font + * charcount can change and cannot be used to determine the + * font data allocated size. + */ + if (pitch <= 0) + return -EINVAL; + size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data)); + if (size > FNTSIZE(vc->vc_font.data)) + return -EINVAL; + } + virt_w = FBCON_SWAP(ops->rotate, width, height); virt_h = FBCON_SWAP(ops->rotate, height, width); virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width, @@ -2242,14 +2078,6 @@ static int fbcon_switch(struct vc_data *vc) info = registered_fb[con2fb_map[vc->vc_num]]; ops = info->fbcon_par; - if (softback_top) { - if (softback_lines) - fbcon_set_origin(vc); - softback_top = softback_curr = softback_in = softback_buf; - softback_lines = 0; - fbcon_update_softback(vc); - } - if (logo_shown >= 0) { struct vc_data *conp2 = vc_cons[logo_shown].d; @@ -2572,9 +2400,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int cnt; char *old_data = NULL; - if (con_is_visible(vc) && softback_lines) - fbcon_set_origin(vc); - resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); if (p->userfont) old_data = vc->vc_font.data; @@ -2600,8 +2425,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, cols /= w; rows /= h; vc_resize(vc, cols, rows); - if (con_is_visible(vc) && softback_buf) - fbcon_update_softback(vc); } else if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) { fbcon_clear_margins(vc, 0); @@ -2645,7 +2468,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, int size; int i, csum; u8 *new_data, *data = font->data; - int pitch = (font->width+7) >> 3; + int pitch = PITCH(font->width); /* Is there a reason why fbconsole couldn't handle any charcount >256? * If not this check should be changed to charcount < 256 */ @@ -2661,7 +2484,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, if (fbcon_invalid_charcount(info, charcount)) return -EINVAL; - size = h * pitch * charcount; + size = CALC_FONTSZ(h, pitch, charcount); new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER); @@ -2760,19 +2583,7 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table) static u16 *fbcon_screen_pos(struct vc_data *vc, int offset) { - unsigned long p; - int line; - - if (vc->vc_num != fg_console || !softback_lines) - return (u16 *) (vc->vc_origin + offset); - line = offset / vc->vc_size_row; - if (line >= softback_lines) - return (u16 *) (vc->vc_origin + offset - - softback_lines * vc->vc_size_row); - p = softback_curr + offset; - if (p >= softback_end) - p += softback_buf - softback_end; - return (u16 *) p; + return (u16 *) (vc->vc_origin + offset); } static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, @@ -2786,22 +2597,7 @@ static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, x = offset % vc->vc_cols; y = offset / vc->vc_cols; - if (vc->vc_num == fg_console) - y += softback_lines; ret = pos + (vc->vc_cols - x) * 2; - } else if (vc->vc_num == fg_console && softback_lines) { - unsigned long offset = pos - softback_curr; - - if (pos < softback_curr) - offset += softback_end - softback_buf; - offset /= 2; - x = offset % vc->vc_cols; - y = offset / vc->vc_cols; - ret = pos + (vc->vc_cols - x) * 2; - if (ret == softback_end) - ret = softback_buf; - if (ret == softback_in) - ret = vc->vc_origin; } else { /* Should not happen */ x = y = 0; @@ -2829,106 +2625,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt) a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | (((a) & 0x0700) << 4); scr_writew(a, p++); - if (p == (u16 *) softback_end) - p = (u16 *) softback_buf; - if (p == (u16 *) softback_in) - p = (u16 *) vc->vc_origin; } } -static void fbcon_scrolldelta(struct vc_data *vc, int lines) -{ - struct fb_info *info = registered_fb[con2fb_map[fg_console]]; - struct fbcon_ops *ops = info->fbcon_par; - struct fbcon_display *disp = &fb_display[fg_console]; - int offset, limit, scrollback_old; - - if (softback_top) { - if (vc->vc_num != fg_console) - return; - if (vc->vc_mode != KD_TEXT || !lines) - return; - if (logo_shown >= 0) { - struct vc_data *conp2 = vc_cons[logo_shown].d; - - if (conp2->vc_top == logo_lines - && conp2->vc_bottom == conp2->vc_rows) - conp2->vc_top = 0; - if (logo_shown == vc->vc_num) { - unsigned long p, q; - int i; - - p = softback_in; - q = vc->vc_origin + - logo_lines * vc->vc_size_row; - for (i = 0; i < logo_lines; i++) { - if (p == softback_top) - break; - if (p == softback_buf) - p = softback_end; - p -= vc->vc_size_row; - q -= vc->vc_size_row; - scr_memcpyw((u16 *) q, (u16 *) p, - vc->vc_size_row); - } - softback_in = softback_curr = p; - update_region(vc, vc->vc_origin, - logo_lines * vc->vc_cols); - } - logo_shown = FBCON_LOGO_CANSHOW; - } - fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK); - fbcon_redraw_softback(vc, disp, lines); - fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK); - return; - } - - if (!scrollback_phys_max) - return; - - scrollback_old = scrollback_current; - scrollback_current -= lines; - if (scrollback_current < 0) - scrollback_current = 0; - else if (scrollback_current > scrollback_max) - scrollback_current = scrollback_max; - if (scrollback_current == scrollback_old) - return; - - if (fbcon_is_inactive(vc, info)) - return; - - fbcon_cursor(vc, CM_ERASE); - - offset = disp->yscroll - scrollback_current; - limit = disp->vrows; - switch (disp->scrollmode) { - case SCROLL_WRAP_MOVE: - info->var.vmode |= FB_VMODE_YWRAP; - break; - case SCROLL_PAN_MOVE: - case SCROLL_PAN_REDRAW: - limit -= vc->vc_rows; - info->var.vmode &= ~FB_VMODE_YWRAP; - break; - } - if (offset < 0) - offset += limit; - else if (offset >= limit) - offset -= limit; - - ops->var.xoffset = 0; - ops->var.yoffset = offset * vc->vc_font.height; - ops->update_start(info); - - if (!scrollback_current) - fbcon_cursor(vc, CM_DRAW); -} - static int fbcon_set_origin(struct vc_data *vc) { - if (softback_lines) - fbcon_scrolldelta(vc, softback_lines); return 0; } @@ -2992,8 +2693,6 @@ static void fbcon_modechanged(struct fb_info *info) fbcon_set_palette(vc, color_table); update_screen(vc); - if (softback_buf) - fbcon_update_softback(vc); } } @@ -3404,7 +3103,6 @@ static const struct consw fb_con = { .con_font_default = fbcon_set_def_font, .con_font_copy = fbcon_copy_font, .con_set_palette = fbcon_set_palette, - .con_scrolldelta = fbcon_scrolldelta, .con_set_origin = fbcon_set_origin, .con_invert_region = fbcon_invert_region, .con_screen_pos = fbcon_screen_pos, @@ -3639,9 +3337,6 @@ static void fbcon_exit(void) } #endif - kvfree((void *)softback_buf); - softback_buf = 0UL; - for_each_registered_fb(i) { int pending = 0; diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 20dea853765f..78bb14c03643 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -62,7 +62,7 @@ struct fbcon_ops { void (*clear_margins)(struct vc_data *vc, struct fb_info *info, int color, int bottom_only); void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg); + int fg, int bg); int (*update_start)(struct fb_info *info); int (*rotate_font)(struct fb_info *info, struct vc_data *vc); struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */ diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index dfa9a8aa4509..71ad6967a70e 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -201,7 +201,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset; region.height = rw; @@ -209,7 +209,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset + bs; region.dy = 0; region.height = info->var.yres_virtual; @@ -219,7 +219,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, } static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -236,15 +236,6 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index ce08251bfd38..31fe5dd651d4 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -184,7 +184,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset + rs; region.height = rw; @@ -192,7 +192,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset; region.dy = info->var.yoffset; region.height = info->var.yres; @@ -202,7 +202,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, } static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -219,15 +219,6 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 1936afc78fec..b2dd1370e39b 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -231,7 +231,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dy = 0; region.dx = info->var.xoffset; region.width = rw; @@ -239,7 +239,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dy = info->var.yoffset; region.dx = info->var.xoffset; region.height = bh; @@ -249,7 +249,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, } static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -267,15 +267,6 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index e1937650adc2..15ce4cafb55c 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -952,7 +952,6 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var, int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) { - int flags = info->flags; int ret = 0; u32 activate; struct fb_var_screeninfo old_var; @@ -1047,9 +1046,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) event.data = &mode; fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event); - if (flags & FBINFO_MISC_USEREVENT) - fbcon_update_vcs(info, activate & FB_ACTIVATE_ALL); - return 0; } EXPORT_SYMBOL(fb_set_var); @@ -1100,9 +1096,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, return -EFAULT; console_lock(); lock_fb_info(info); - info->flags |= FBINFO_MISC_USEREVENT; ret = fb_set_var(info, &var); - info->flags &= ~FBINFO_MISC_USEREVENT; + if (!ret) + fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); unlock_fb_info(info); console_unlock(); if (!ret && copy_to_user(argp, &var, sizeof(var))) diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c index d54c88f88991..65dae05fff8e 100644 --- a/drivers/video/fbdev/core/fbsysfs.c +++ b/drivers/video/fbdev/core/fbsysfs.c @@ -91,9 +91,9 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var) var->activate |= FB_ACTIVATE_FORCE; console_lock(); - fb_info->flags |= FBINFO_MISC_USEREVENT; err = fb_set_var(fb_info, var); - fb_info->flags &= ~FBINFO_MISC_USEREVENT; + if (!err) + fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL); console_unlock(); if (err) return err; diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index 93390312957f..eb664dbf96f6 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -80,7 +80,7 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info, } static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_tilecursor cursor; int use_sw = (vc->vc_cursor_type & 0x10); diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 51d97ec4f58f..e0cbf5b3d217 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -453,7 +453,7 @@ static int efifb_probe(struct platform_device *dev) info->apertures->ranges[0].base = efifb_fix.smem_start; info->apertures->ranges[0].size = size_remap; - if (efi_enabled(EFI_BOOT) && + if (efi_enabled(EFI_MEMMAP) && !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) { if ((efifb_fix.smem_start + efifb_fix.smem_len) > (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) { diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c index b770946a0920..76464000933d 100644 --- a/drivers/video/fbdev/neofb.c +++ b/drivers/video/fbdev/neofb.c @@ -1820,6 +1820,7 @@ static int neo_scan_monitor(struct fb_info *info) #else printk(KERN_ERR "neofb: Only 640x480, 800x600/480 and 1024x768 panels are currently supported\n"); + kfree(info->monspecs.modedb); return -1; #endif default: diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c index 376ee5bc3ddc..34e8171856e9 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c @@ -520,8 +520,11 @@ int dispc_runtime_get(void) DSSDBG("dispc_runtime_get\n"); r = pm_runtime_get_sync(&dispc.pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&dispc.pdev->dev); + return r; + } + return 0; } EXPORT_SYMBOL(dispc_runtime_get); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c index d620376216e1..6f9c25fec994 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c @@ -1137,8 +1137,11 @@ static int dsi_runtime_get(struct platform_device *dsidev) DSSDBG("dsi_runtime_get\n"); r = pm_runtime_get_sync(&dsi->pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&dsi->pdev->dev); + return r; + } + return 0; } static void dsi_runtime_put(struct platform_device *dsidev) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c index 7252d22dd117..a6b1c1598040 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c @@ -768,8 +768,11 @@ int dss_runtime_get(void) DSSDBG("dss_runtime_get\n"); r = pm_runtime_get_sync(&dss.pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&dss.pdev->dev); + return r; + } + return 0; } void dss_runtime_put(void) @@ -833,7 +836,7 @@ static const struct dss_features omap34xx_dss_feats = { }; static const struct dss_features omap3630_dss_feats = { - .fck_div_max = 32, + .fck_div_max = 31, .dss_fck_multiplier = 1, .parent_clk_name = "dpll4_ck", .dpi_select_source = &dss_dpi_select_source_omap2_omap3, diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c index 7060ae56c062..4804aab34298 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c @@ -39,9 +39,10 @@ static int hdmi_runtime_get(void) DSSDBG("hdmi_runtime_get\n"); r = pm_runtime_get_sync(&hdmi.pdev->dev); - WARN_ON(r < 0); - if (r < 0) + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&hdmi.pdev->dev); return r; + } return 0; } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c index ac49531e4732..a06b6f1355bd 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c @@ -43,9 +43,10 @@ static int hdmi_runtime_get(void) DSSDBG("hdmi_runtime_get\n"); r = pm_runtime_get_sync(&hdmi.pdev->dev); - WARN_ON(r < 0); - if (r < 0) + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&hdmi.pdev->dev); return r; + } return 0; } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c index f81e2a46366d..3717dac3dcc8 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c @@ -391,8 +391,11 @@ static int venc_runtime_get(void) DSSDBG("venc_runtime_get\n"); r = pm_runtime_get_sync(&venc.pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&venc.pdev->dev); + return r; + } + return 0; } static void venc_runtime_put(void) diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c index 5ed2db39d823..ce90483c5020 100644 --- a/drivers/video/fbdev/ps3fb.c +++ b/drivers/video/fbdev/ps3fb.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -824,12 +825,12 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd, var = info->var; fb_videomode_to_var(&var, vmode); console_lock(); - info->flags |= FBINFO_MISC_USEREVENT; /* Force, in case only special bits changed */ var.activate |= FB_ACTIVATE_FORCE; par->new_mode_id = val; retval = fb_set_var(info, &var); - info->flags &= ~FBINFO_MISC_USEREVENT; + if (!retval) + fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); console_unlock(); } break; diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index f70c9f79622e..27635926cea3 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -2425,8 +2425,8 @@ static int pxafb_remove(struct platform_device *dev) free_pages_exact(fbi->video_mem, fbi->video_mem_size); - dma_free_wc(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, - fbi->dma_buff_phys); + dma_free_coherent(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, + fbi->dma_buff_phys); return 0; } diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c index 512789f5f884..d5d22d9c0f56 100644 --- a/drivers/video/fbdev/savage/savagefb_driver.c +++ b/drivers/video/fbdev/savage/savagefb_driver.c @@ -2158,6 +2158,8 @@ static int savage_init_fb_info(struct fb_info *info, struct pci_dev *dev, info->flags |= FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; + else + kfree(info->pixmap.addr); } #endif return err; diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c index 207d0add684b..246681414577 100644 --- a/drivers/video/fbdev/sm712fb.c +++ b/drivers/video/fbdev/sm712fb.c @@ -1429,6 +1429,8 @@ static int smtc_map_smem(struct smtcfb_info *sfb, static void smtc_unmap_smem(struct smtcfb_info *sfb) { if (sfb && sfb->fb->screen_base) { + if (sfb->chip_id == 0x720) + sfb->fb->screen_base -= 0x00200000; iounmap(sfb->fb->screen_base); sfb->fb->screen_base = NULL; } diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c index 2c6a576ed84c..4b83109202b1 100644 --- a/drivers/video/fbdev/vga16fb.c +++ b/drivers/video/fbdev/vga16fb.c @@ -1121,7 +1121,7 @@ static void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *i char oldop = setop(0); char oldsr = setsr(0); char oldmask = selectmask(); - const char *cdat = image->data; + const unsigned char *cdat = image->data; u32 dx = image->dx; char __iomem *where; int y; diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index 2307b0329aec..95bfdb8ac8a2 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -1443,7 +1443,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, or_mask = caps->u.in.or_mask; not_mask = caps->u.in.not_mask; - if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) + if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK) return -EINVAL; ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask, @@ -1519,7 +1519,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */ if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) || - req == VBG_IOCTL_VMMDEV_REQUEST_BIG) + req == VBG_IOCTL_VMMDEV_REQUEST_BIG || + req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT) return vbg_ioctl_vmmrequest(gdev, session, data); if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT) @@ -1557,6 +1558,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) case VBG_IOCTL_HGCM_CALL(0): return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); case VBG_IOCTL_LOG(0): + case VBG_IOCTL_LOG_ALT(0): return vbg_ioctl_log(data); } diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h index 4188c12b839f..77c3a9c8255d 100644 --- a/drivers/virt/vboxguest/vboxguest_core.h +++ b/drivers/virt/vboxguest/vboxguest_core.h @@ -15,6 +15,21 @@ #include #include "vmmdev.h" +/* + * The mainline kernel version (this version) of the vboxguest module + * contained a bug where it defined VBGL_IOCTL_VMMDEV_REQUEST_BIG and + * VBGL_IOCTL_LOG using _IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead + * of _IO(V, ...) as the out of tree VirtualBox upstream version does. + * + * These _ALT definitions keep compatibility with the wrong defines the + * mainline kernel version used for a while. + * Note the VirtualBox userspace bits have always been built against + * VirtualBox upstream's headers, so this is likely not necessary. But + * we must never break our ABI so we keep these around to be 100% sure. + */ +#define VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) +#define VBG_IOCTL_LOG_ALT(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) + struct vbg_session; /** VBox guest memory balloon. */ diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c index 6e8c0f1c1056..32c2c52f7e84 100644 --- a/drivers/virt/vboxguest/vboxguest_linux.c +++ b/drivers/virt/vboxguest/vboxguest_linux.c @@ -131,7 +131,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, * the need for a bounce-buffer and another copy later on. */ is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || - req == VBG_IOCTL_VMMDEV_REQUEST_BIG; + req == VBG_IOCTL_VMMDEV_REQUEST_BIG || + req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT; if (is_vmmdev_req) buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT, diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h index 6337b8d75d96..21f408120e3f 100644 --- a/drivers/virt/vboxguest/vmmdev.h +++ b/drivers/virt/vboxguest/vmmdev.h @@ -206,6 +206,8 @@ VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8); * not. */ #define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2) +/* The mask of valid capabilities, for sanity checking. */ +#define VMMDEV_GUEST_CAPABILITIES_MASK 0x00000007U /** struct vmmdev_hypervisorinfo - Hypervisor info structure. */ struct vmmdev_hypervisorinfo { diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 7aaf150f89ba..1e444826a66e 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -529,10 +529,14 @@ static int init_vqs(struct virtio_balloon *vb) static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) { if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, - &vb->config_read_bitmap)) + &vb->config_read_bitmap)) { virtio_cread(vb->vdev, struct virtio_balloon_config, free_page_report_cmd_id, &vb->cmd_id_received_cache); + /* Legacy balloon config space is LE, unlike all other devices. */ + if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) + vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache); + } return vb->cmd_id_received_cache; } diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 58b96baa8d48..4f7c73e6052f 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); + if (unlikely(vq->broken)) + return false; + virtio_mb(vq->weak_barriers); return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : virtqueue_poll_split(_vq, last_used_idx); diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index 4164045866b3..6bac5c18cf6d 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c @@ -176,7 +176,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) /* check irqstatus */ if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { dev_dbg(hdq_data->dev, "timeout waiting for" - " TXCOMPLETE/RXCOMPLETE, %x", *status); + " TXCOMPLETE/RXCOMPLETE, %x\n", *status); ret = -ETIMEDOUT; goto out; } @@ -187,7 +187,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) OMAP_HDQ_FLAG_CLEAR, &tmp_status); if (ret) { dev_dbg(hdq_data->dev, "timeout waiting GO bit" - " return to zero, %x", tmp_status); + " return to zero, %x\n", tmp_status); } out: @@ -203,7 +203,7 @@ static irqreturn_t hdq_isr(int irq, void *_hdq) spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); - dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); + dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus); if (hdq_data->hdq_irqstatus & (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE @@ -311,7 +311,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) tmp_status = hdq_data->hdq_irqstatus; /* check irqstatus */ if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { - dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", + dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n", tmp_status); ret = -ETIMEDOUT; goto out; @@ -338,7 +338,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) &tmp_status); if (ret) dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" - " return to zero, %x", tmp_status); + " return to zero, %x\n", tmp_status); out: mutex_unlock(&hdq_data->hdq_mutex); diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c index e92f38fcb7a4..1b9bcfed39e9 100644 --- a/drivers/watchdog/da9062_wdt.c +++ b/drivers/watchdog/da9062_wdt.c @@ -55,11 +55,6 @@ static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt, unsigned int regval) { struct da9062 *chip = wdt->hw; - int ret; - - ret = da9062_reset_watchdog_timer(wdt); - if (ret) - return ret; regmap_update_bits(chip->regmap, DA9062AA_CONTROL_D, diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index e46104c2fd94..893cef70c159 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -689,9 +689,9 @@ static int __init watchdog_init(int sioaddr) * into the module have been registered yet. */ watchdog.sioaddr = sioaddr; - watchdog.ident.options = WDIOC_SETTIMEOUT - | WDIOF_MAGICCLOSE - | WDIOF_KEEPALIVEPING; + watchdog.ident.options = WDIOF_MAGICCLOSE + | WDIOF_KEEPALIVEPING + | WDIOF_CARDRESET; snprintf(watchdog.ident.identity, sizeof(watchdog.ident.identity), "%s watchdog", @@ -705,6 +705,13 @@ static int __init watchdog_init(int sioaddr) wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF); watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS); + /* + * We don't want WDTMOUT_STS to stick around till regular reboot. + * Write 1 to the bit to clear it to zero. + */ + superio_outb(sioaddr, F71808FG_REG_WDT_CONF, + wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS)); + superio_exit(sioaddr); err = watchdog_set_timeout(timeout); diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index c4147e93aa7d..3729f99fd8ec 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -974,6 +974,15 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) if (IS_ERR_OR_NULL(watchdog_kworker)) return -ENODEV; + device_initialize(&wd_data->dev); + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); + wd_data->dev.class = &watchdog_class; + wd_data->dev.parent = wdd->parent; + wd_data->dev.groups = wdd->groups; + wd_data->dev.release = watchdog_core_data_release; + dev_set_drvdata(&wd_data->dev, wdd); + dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); + kthread_init_work(&wd_data->work, watchdog_ping_work); hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); wd_data->timer.function = watchdog_timer_expired; @@ -994,15 +1003,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) } } - device_initialize(&wd_data->dev); - wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); - wd_data->dev.class = &watchdog_class; - wd_data->dev.parent = wdd->parent; - wd_data->dev.groups = wdd->groups; - wd_data->dev.release = watchdog_core_data_release; - dev_set_drvdata(&wd_data->dev, wdd); - dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); - /* Fill in the data structures */ cdev_init(&wd_data->cdev, &watchdog_fops); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index bed90d612e48..ebb05517b6aa 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -570,11 +570,13 @@ static int add_ballooned_pages(int nr_pages) if (xen_hotplug_unpopulated) { st = reserve_additional_memory(); if (st != BP_ECANCELED) { + int rc; + mutex_unlock(&balloon_mutex); - wait_event(balloon_wq, + rc = wait_event_interruptible(balloon_wq, !list_empty(&ballooned_pages)); mutex_lock(&balloon_mutex); - return 0; + return rc ? -ENOMEM : 0; } } @@ -632,6 +634,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) out_undo: mutex_unlock(&balloon_mutex); free_xenballooned_pages(pgno, pages); + /* + * NB: free_xenballooned_pages will only subtract pgno pages, but since + * target_unpopulated is incremented with nr_pages at the start we need + * to remove the remaining ones also, or accounting will be screwed. + */ + balloon_stats.target_unpopulated -= nr_pages - pgno; return ret; } EXPORT_SYMBOL(alloc_xenballooned_pages); diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 6c8843968a52..55f2b834cf13 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -155,7 +155,7 @@ int get_evtchn_to_irq(unsigned evtchn) /* Get info for IRQ */ struct irq_info *info_for_irq(unsigned irq) { - return irq_get_handler_data(irq); + return irq_get_chip_data(irq); } /* Constructors for packed IRQ information. */ @@ -376,7 +376,7 @@ static void xen_irq_init(unsigned irq) info->type = IRQT_UNBOUND; info->refcnt = -1; - irq_set_handler_data(irq, info); + irq_set_chip_data(irq, info); list_add_tail(&info->list, &xen_irq_list_head); } @@ -425,14 +425,14 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) static void xen_free_irq(unsigned irq) { - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = irq_get_chip_data(irq); if (WARN_ON(!info)) return; list_del(&info->list); - irq_set_handler_data(irq, NULL); + irq_set_chip_data(irq, NULL); WARN_ON(info->refcnt > 0); @@ -602,7 +602,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi); static void __unbind_from_irq(unsigned int irq) { int evtchn = evtchn_from_irq(irq); - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = irq_get_chip_data(irq); if (info->refcnt > 0) { info->refcnt--; @@ -1106,7 +1106,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, void unbind_from_irqhandler(unsigned int irq, void *dev_id) { - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = irq_get_chip_data(irq); if (WARN_ON(!info)) return; @@ -1140,7 +1140,7 @@ int evtchn_make_refcounted(unsigned int evtchn) if (irq == -1) return -ENOENT; - info = irq_get_handler_data(irq); + info = irq_get_chip_data(irq); if (!info) return -ENOENT; @@ -1168,7 +1168,7 @@ int evtchn_get(unsigned int evtchn) if (irq == -1) goto done; - info = irq_get_handler_data(irq); + info = irq_get_chip_data(irq); if (!info) goto done; diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c index 2c4f324f8626..da799929087d 100644 --- a/drivers/xen/gntdev-dmabuf.c +++ b/drivers/xen/gntdev-dmabuf.c @@ -641,6 +641,14 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, goto fail_detach; } + /* Check that we have zero offset. */ + if (sgt->sgl->offset) { + ret = ERR_PTR(-EINVAL); + pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n", + sgt->sgl->offset); + goto fail_unmap; + } + /* Check number of pages that imported buffer has. */ if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) { ret = ERR_PTR(-EINVAL); diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c index 456a164364a2..98a9d6892d98 100644 --- a/drivers/xen/preempt.c +++ b/drivers/xen/preempt.c @@ -27,7 +27,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); asmlinkage __visible void xen_maybe_preempt_hcall(void) { if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) - && need_resched())) { + && need_resched() && !preempt_count())) { /* * Clear flag as we may be rescheduled on a different * cpu. diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index bb7a1101cff7..06346422f743 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -335,6 +335,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, int order = get_order(size); phys_addr_t phys; u64 dma_mask = DMA_BIT_MASK(32); + struct page *page; if (hwdev && hwdev->coherent_dma_mask) dma_mask = hwdev->coherent_dma_mask; @@ -346,9 +347,14 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, /* Convert the size to actually allocated. */ size = 1UL << (order + XEN_PAGE_SHIFT); - if (WARN_ON((dev_addr + size - 1 > dma_mask) || + if (is_vmalloc_addr(vaddr)) + page = vmalloc_to_page(vaddr); + else + page = virt_to_page(vaddr); + + if (!WARN_ON((dev_addr + size - 1 > dma_mask) || range_straddles_page_boundary(phys, size)) && - TestClearPageXenRemapped(virt_to_page(vaddr))) + TestClearPageXenRemapped(page)) xen_destroy_contiguous_region(phys, order); xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index a38292ef79f6..f38bdaea0ef1 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -363,8 +363,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, int i, j; for (i = 0; i < nr_pages; i++) { - err = gnttab_grant_foreign_access(dev->otherend_id, - virt_to_gfn(vaddr), 0); + unsigned long gfn; + + if (is_vmalloc_addr(vaddr)) + gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr)); + else + gfn = virt_to_gfn(vaddr); + + err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); if (err < 0) { xenbus_dev_fatal(dev, err, "granting access to ring page"); diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 15a99f9c7253..39def020a074 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -500,10 +500,9 @@ void v9fs_session_close(struct v9fs_session_info *v9ses) } #ifdef CONFIG_9P_FSCACHE - if (v9ses->fscache) { + if (v9ses->fscache) v9fs_cache_session_put_cookie(v9ses); - kfree(v9ses->cachetag); - } + kfree(v9ses->cachetag); #endif kfree(v9ses->uname); kfree(v9ses->aname); diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index f708c45d5f66..29f11e10a7c7 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -420,24 +420,51 @@ affs_mode_to_prot(struct inode *inode) u32 prot = AFFS_I(inode)->i_protect; umode_t mode = inode->i_mode; + /* + * First, clear all RWED bits for owner, group, other. + * Then, recalculate them afresh. + * + * We'll always clear the delete-inhibit bit for the owner, as that is + * the classic single-user mode AmigaOS protection bit and we need to + * stay compatible with all scenarios. + * + * Since multi-user AmigaOS is an extension, we'll only set the + * delete-allow bit if any of the other bits in the same user class + * (group/other) are used. + */ + prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD + | FIBF_NOWRITE | FIBF_NODELETE + | FIBF_GRP_EXECUTE | FIBF_GRP_READ + | FIBF_GRP_WRITE | FIBF_GRP_DELETE + | FIBF_OTR_EXECUTE | FIBF_OTR_READ + | FIBF_OTR_WRITE | FIBF_OTR_DELETE); + + /* Classic single-user AmigaOS flags. These are inverted. */ if (!(mode & 0100)) prot |= FIBF_NOEXECUTE; if (!(mode & 0400)) prot |= FIBF_NOREAD; if (!(mode & 0200)) prot |= FIBF_NOWRITE; + + /* Multi-user extended flags. Not inverted. */ if (mode & 0010) prot |= FIBF_GRP_EXECUTE; if (mode & 0040) prot |= FIBF_GRP_READ; if (mode & 0020) prot |= FIBF_GRP_WRITE; + if (mode & 0070) + prot |= FIBF_GRP_DELETE; + if (mode & 0001) prot |= FIBF_OTR_EXECUTE; if (mode & 0004) prot |= FIBF_OTR_READ; if (mode & 0002) prot |= FIBF_OTR_WRITE; + if (mode & 0007) + prot |= FIBF_OTR_DELETE; AFFS_I(inode)->i_protect = prot; } diff --git a/fs/affs/file.c b/fs/affs/file.c index a85817f54483..ba084b0b214b 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -428,6 +428,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping, return ret; } +static int affs_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned int len, unsigned int copied, + struct page *page, void *fsdata) +{ + struct inode *inode = mapping->host; + int ret; + + ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); + + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + + return ret; +} + static sector_t _affs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,affs_get_block); @@ -437,7 +455,7 @@ const struct address_space_operations affs_aops = { .readpage = affs_readpage, .writepage = affs_writepage, .write_begin = affs_write_begin, - .write_end = generic_write_end, + .write_end = affs_write_end, .direct_IO = affs_direct_IO, .bmap = _affs_bmap }; @@ -794,6 +812,12 @@ done: if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + err_first_bh: unlock_page(page); put_page(page); diff --git a/fs/afs/cell.c b/fs/afs/cell.c index 78ba5f932287..296b489861a9 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -154,10 +154,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, return ERR_PTR(-ENOMEM); } + cell->name = kmalloc(namelen + 1, GFP_KERNEL); + if (!cell->name) { + kfree(cell); + return ERR_PTR(-ENOMEM); + } + cell->net = net; cell->name_len = namelen; for (i = 0; i < namelen; i++) cell->name[i] = tolower(name[i]); + cell->name[i] = 0; atomic_set(&cell->usage, 2); INIT_WORK(&cell->manager, afs_manage_cell); @@ -203,6 +210,7 @@ parse_failed: if (ret == -EINVAL) printk(KERN_ERR "kAFS: bad VL server IP address\n"); error: + kfree(cell->name); kfree(cell); _leave(" = %d", ret); return ERR_PTR(ret); @@ -483,6 +491,7 @@ static void afs_cell_destroy(struct rcu_head *rcu) afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers)); key_put(cell->anonymous_key); + kfree(cell->name); kfree(cell); _leave(" [destroyed]"); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index d1e1caa23c8b..3c486340b220 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -658,7 +658,8 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, cookie->ctx.actor = afs_lookup_filldir; cookie->name = dentry->d_name; - cookie->nr_fids = 1; /* slot 0 is saved for the fid we actually want */ + cookie->nr_fids = 2; /* slot 0 is saved for the fid we actually want + * and slot 1 for the directory */ read_seqlock_excl(&dvnode->cb_lock); dcbi = rcu_dereference_protected(dvnode->cb_interest, @@ -709,7 +710,11 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, if (!cookie->inodes) goto out_s; - for (i = 1; i < cookie->nr_fids; i++) { + cookie->fids[1] = dvnode->fid; + cookie->statuses[1].cb_break = afs_calc_vnode_cb_break(dvnode); + cookie->inodes[1] = igrab(&dvnode->vfs_inode); + + for (i = 2; i < cookie->nr_fids; i++) { scb = &cookie->statuses[i]; /* Find any inodes that already exist and get their diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index 7503899c0a1b..f07e53ab808e 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -289,15 +289,17 @@ void afs_dynroot_depopulate(struct super_block *sb) net->dynroot_sb = NULL; mutex_unlock(&net->proc_cells_lock); - inode_lock(root->d_inode); + if (root) { + inode_lock(root->d_inode); - /* Remove all the pins for dirs created for manually added cells */ - list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { - if (subdir->d_fsdata) { - subdir->d_fsdata = NULL; - dput(subdir); + /* Remove all the pins for dirs created for manually added cells */ + list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { + if (subdir->d_fsdata) { + subdir->d_fsdata = NULL; + dput(subdir); + } } - } - inode_unlock(root->d_inode); + inode_unlock(root->d_inode); + } } diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index 02e976ca5732..51ee3dd79700 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -92,8 +92,8 @@ responded: } } - rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); - if (rtt_us < server->probe.rtt) { + if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && + rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; alist->preferred = index; have_result = true; diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 0a4fed9e706b..5c2729fc07e5 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -56,16 +56,15 @@ static void xdr_dump_bad(const __be32 *bp) /* * decode an AFSFetchStatus block */ -static int xdr_decode_AFSFetchStatus(const __be32 **_bp, - struct afs_call *call, - struct afs_status_cb *scb) +static void xdr_decode_AFSFetchStatus(const __be32 **_bp, + struct afs_call *call, + struct afs_status_cb *scb) { const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp; struct afs_file_status *status = &scb->status; bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus); u64 data_version, size; u32 type, abort_code; - int ret; abort_code = ntohl(xdr->abort_code); @@ -79,7 +78,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, */ status->abort_code = abort_code; scb->have_error = true; - goto good; + goto advance; } pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version)); @@ -89,7 +88,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, if (abort_code != 0 && inline_error) { status->abort_code = abort_code; scb->have_error = true; - goto good; + goto advance; } type = ntohl(xdr->type); @@ -125,15 +124,13 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp, data_version |= (u64)ntohl(xdr->data_version_hi) << 32; status->data_version = data_version; scb->have_status = true; -good: - ret = 0; advance: *_bp = (const void *)*_bp + sizeof(*xdr); - return ret; + return; bad: xdr_dump_bad(*_bp); - ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); + afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); goto advance; } @@ -254,9 +251,7 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSCallBack(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); @@ -419,9 +414,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSCallBack(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); @@ -579,12 +572,8 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSFid(&bp, call->out_fid); - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_AFSCallBack(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); @@ -693,9 +682,7 @@ static int afs_deliver_fs_dir_status_and_vol(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -786,12 +773,8 @@ static int afs_deliver_fs_link(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -880,12 +863,8 @@ static int afs_deliver_fs_symlink(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_AFSFid(&bp, call->out_fid); - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -988,16 +967,12 @@ static int afs_deliver_fs_rename(struct afs_call *call) if (ret < 0) return ret; + bp = call->buffer; /* If the two dirs are the same, we have two copies of the same status * report, so we just decode it twice. */ - bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -1105,9 +1080,7 @@ static int afs_deliver_fs_store_data(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -1285,9 +1258,7 @@ static int afs_deliver_fs_store_status(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -1956,9 +1927,7 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSCallBack(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); @@ -2064,10 +2033,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) bp = call->buffer; scb = &call->out_scb[call->count]; - ret = xdr_decode_AFSFetchStatus(&bp, call, scb); - if (ret < 0) - return ret; - + xdr_decode_AFSFetchStatus(&bp, call, scb); call->count++; if (call->count < call->count2) goto more_counts; @@ -2245,9 +2211,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); call->unmarshall++; @@ -2328,9 +2292,7 @@ static int afs_deliver_fs_file_status_and_vol(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_AFSFetchStatus(&bp, call, call->out_scb); xdr_decode_AFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 46d2d7cb461d..a74e8e209454 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -171,6 +171,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc, struct timespec64 t; umode_t mode; bool data_changed = false; + bool change_size = false; BUG_ON(test_bit(AFS_VNODE_UNSET, &vnode->flags)); @@ -226,6 +227,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc, } else { set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags); } + change_size = true; } else if (vnode->status.type == AFS_FTYPE_DIR) { /* Expected directory change is handled elsewhere so * that we can locally edit the directory and save on a @@ -233,11 +235,19 @@ static void afs_apply_status(struct afs_fs_cursor *fc, */ if (test_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) data_changed = false; + change_size = true; } if (data_changed) { inode_set_iversion_raw(&vnode->vfs_inode, status->data_version); - afs_set_i_size(vnode, status->size); + + /* Only update the size if the data version jumped. If the + * file is being modified locally, then we might have our own + * idea of what the size should be that's not the same as + * what's on the server. + */ + if (change_size) + afs_set_i_size(vnode, status->size); } } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 485cc3b2aaa8..7fe88d918b23 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -161,6 +161,7 @@ struct afs_call { bool upgrade; /* T to request service upgrade */ bool have_reply_time; /* T if have got reply_time */ bool intr; /* T if interruptible */ + bool unmarshalling_error; /* T if an unmarshalling error occurred */ u16 service_id; /* Actual service ID (after upgrade) */ unsigned int debug_id; /* Trace ID */ u32 operation_ID; /* operation ID for an incoming call */ @@ -396,7 +397,7 @@ struct afs_cell { struct afs_vlserver_list __rcu *vl_servers; u8 name_len; /* Length of name */ - char name[64 + 1]; /* Cell name, case-flattened and NUL-padded */ + char *name; /* Cell name, case-flattened and NUL-padded */ }; /* diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 52b19e9c1535..5334f1bd2bca 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c @@ -83,6 +83,7 @@ int afs_abort_to_error(u32 abort_code) case UAENOLCK: return -ENOLCK; case UAENOTEMPTY: return -ENOTEMPTY; case UAELOOP: return -ELOOP; + case UAEOVERFLOW: return -EOVERFLOW; case UAENOMEDIUM: return -ENOMEDIUM; case UAEDQUOT: return -EDQUOT; diff --git a/fs/afs/proc.c b/fs/afs/proc.c index fba2ec3a3a9c..106b27011f6d 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -562,6 +562,7 @@ void afs_put_sysnames(struct afs_sysnames *sysnames) if (sysnames->subs[i] != afs_init_sysname && sysnames->subs[i] != sysnames->blank) kfree(sysnames->subs[i]); + kfree(sysnames); } } diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 52aa90fb4fbd..6adab30a8399 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -540,6 +540,8 @@ static void afs_deliver_to_call(struct afs_call *call) ret = call->type->deliver(call); state = READ_ONCE(call->state); + if (ret == 0 && call->unmarshalling_error) + ret = -EBADMSG; switch (ret) { case 0: afs_queue_call_work(call); @@ -963,5 +965,7 @@ noinline int afs_protocol_error(struct afs_call *call, int error, enum afs_eproto_cause cause) { trace_afs_protocol_error(call, error, cause); + if (call) + call->unmarshalling_error = true; return error; } diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c index e3aa013c2177..081b7e5b13f5 100644 --- a/fs/afs/vl_probe.c +++ b/fs/afs/vl_probe.c @@ -92,8 +92,8 @@ responded: } } - rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); - if (rtt_us < server->probe.rtt) { + if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && + rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; alist->preferred = index; have_result = true; diff --git a/fs/afs/write.c b/fs/afs/write.c index cb76566763db..96b042af6248 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -194,11 +194,11 @@ int afs_write_end(struct file *file, struct address_space *mapping, i_size = i_size_read(&vnode->vfs_inode); if (maybe_i_size > i_size) { - spin_lock(&vnode->wb_lock); + write_seqlock(&vnode->cb_lock); i_size = i_size_read(&vnode->vfs_inode); if (maybe_i_size > i_size) i_size_write(&vnode->vfs_inode, maybe_i_size); - spin_unlock(&vnode->wb_lock); + write_sequnlock(&vnode->cb_lock); } if (!PageUptodate(page)) { @@ -811,6 +811,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) vmf->page->index, priv); SetPagePrivate(vmf->page); set_page_private(vmf->page, priv); + file_update_time(file); sb_end_pagefault(inode->i_sb); return VM_FAULT_LOCKED; diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 8af7f093305d..d21cf61d86b9 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -179,21 +179,20 @@ static void xdr_dump_bad(const __be32 *bp) /* * Decode a YFSFetchStatus block */ -static int xdr_decode_YFSFetchStatus(const __be32 **_bp, - struct afs_call *call, - struct afs_status_cb *scb) +static void xdr_decode_YFSFetchStatus(const __be32 **_bp, + struct afs_call *call, + struct afs_status_cb *scb) { const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp; struct afs_file_status *status = &scb->status; u32 type; - int ret; status->abort_code = ntohl(xdr->abort_code); if (status->abort_code != 0) { if (status->abort_code == VNOVNODE) status->nlink = 0; scb->have_error = true; - goto good; + goto advance; } type = ntohl(xdr->type); @@ -221,15 +220,13 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp, status->size = xdr_to_u64(xdr->size); status->data_version = xdr_to_u64(xdr->data_version); scb->have_status = true; -good: - ret = 0; advance: *_bp += xdr_size(xdr); - return ret; + return; bad: xdr_dump_bad(*_bp); - ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); + afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status); goto advance; } @@ -348,9 +345,7 @@ static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); xdr_decode_YFSCallBack(&bp, call, call->out_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); @@ -372,9 +367,7 @@ static int yfs_deliver_status_and_volsync(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -534,9 +527,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); xdr_decode_YFSCallBack(&bp, call, call->out_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); @@ -645,12 +636,8 @@ static int yfs_deliver_fs_create_vnode(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_YFSFid(&bp, call->out_fid); - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_YFSCallBack(&bp, call, call->out_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); @@ -803,14 +790,9 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - + xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_YFSFid(&bp, &fid); - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); /* Was deleted if vnode->status.abort_code == VNOVNODE. */ xdr_decode_YFSVolSync(&bp, call->out_volsync); @@ -890,10 +872,7 @@ static int yfs_deliver_fs_remove(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - + xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); return 0; } @@ -975,12 +954,8 @@ static int yfs_deliver_fs_link(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); return 0; @@ -1062,12 +1037,8 @@ static int yfs_deliver_fs_symlink(struct afs_call *call) /* unmarshall the reply once we've received all of it */ bp = call->buffer; xdr_decode_YFSFid(&bp, call->out_fid); - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); + xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); @@ -1155,13 +1126,11 @@ static int yfs_deliver_fs_rename(struct afs_call *call) return ret; bp = call->buffer; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); - if (ret < 0) - return ret; - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; - + /* If the two dirs are the same, we have two copies of the same status + * report, so we just decode it twice. + */ + xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb); + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); _leave(" = 0 [done]"); return 0; @@ -1846,9 +1815,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) bp = call->buffer; scb = &call->out_scb[call->count]; - ret = xdr_decode_YFSFetchStatus(&bp, call, scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, scb); call->count++; if (call->count < call->count2) @@ -2068,9 +2035,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) bp = call->buffer; yacl->inherit_flag = ntohl(*bp++); yacl->num_cleaned = ntohl(*bp++); - ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); - if (ret < 0) - return ret; + xdr_decode_YFSFetchStatus(&bp, call, call->out_scb); xdr_decode_YFSVolSync(&bp, call->out_volsync); call->unmarshall++; diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 831a2b25ba79..196f9f64d075 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -571,7 +571,7 @@ static int load_flat_file(struct linux_binprm *bprm, goto err; } - len = data_len + extra; + len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); len = PAGE_ALIGN(len); realdatastart = vm_mmap(NULL, 0, len, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); @@ -585,7 +585,9 @@ static int load_flat_file(struct linux_binprm *bprm, vm_munmap(textpos, text_len); goto err; } - datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN); + datapos = ALIGN(realdatastart + + MAX_SHARED_LIBS * sizeof(unsigned long), + FLAT_DATA_ALIGN); pr_debug("Allocated data+bss+stack (%u bytes): %lx\n", data_len + bss_len + stack_len, datapos); @@ -615,7 +617,7 @@ static int load_flat_file(struct linux_binprm *bprm, memp_size = len; } else { - len = text_len + data_len + extra; + len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(u32); len = PAGE_ALIGN(len); textpos = vm_mmap(NULL, 0, len, PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); @@ -630,7 +632,9 @@ static int load_flat_file(struct linux_binprm *bprm, } realdatastart = textpos + ntohl(hdr->data_start); - datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN); + datapos = ALIGN(realdatastart + + MAX_SHARED_LIBS * sizeof(u32), + FLAT_DATA_ALIGN); reloc = (__be32 __user *) (datapos + (ntohl(hdr->reloc_start) - text_len)); @@ -647,9 +651,8 @@ static int load_flat_file(struct linux_binprm *bprm, (text_len + full_data - sizeof(struct flat_hdr)), 0); - if (datapos != realdatastart) - memmove((void *)datapos, (void *)realdatastart, - full_data); + memmove((void *) datapos, (void *) realdatastart, + full_data); #else /* * This is used on MMU systems mainly for testing. @@ -705,7 +708,8 @@ static int load_flat_file(struct linux_binprm *bprm, if (IS_ERR_VALUE(result)) { ret = result; pr_err("Unable to read code+data+bss, errno %d\n", ret); - vm_munmap(textpos, text_len + data_len + extra); + vm_munmap(textpos, text_len + data_len + extra + + MAX_SHARED_LIBS * sizeof(u32)); goto err; } } diff --git a/fs/block_dev.c b/fs/block_dev.c index 34644ce4b502..2dc9c73a4cb2 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1546,10 +1546,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) */ if (!for_part) { ret = devcgroup_inode_permission(bdev->bd_inode, perm); - if (ret != 0) { - bdput(bdev); + if (ret != 0) return ret; - } } restart: @@ -1618,8 +1616,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) goto out_clear; BUG_ON(for_part); ret = __blkdev_get(whole, mode, 1); - if (ret) + if (ret) { + bdput(whole); goto out_clear; + } bdev->bd_contains = whole; bdev->bd_part = disk_get_part(disk, partno); if (!(disk->flags & GENHD_FL_UP) || @@ -1669,7 +1669,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) disk_unblock_events(disk); put_disk_and_module(disk); out: - bdput(bdev); return ret; } @@ -1736,6 +1735,9 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) bdput(whole); } + if (res) + bdput(bdev); + return res; } EXPORT_SYMBOL(blkdev_get); diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index e5d85311d5d5..86e280edf804 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1422,6 +1422,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans, if (ret < 0 && ret != -ENOENT) { ulist_free(tmp); ulist_free(*roots); + *roots = NULL; return ret; } node = ulist_next(tmp, &uiter); diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 2fead6c3c687..b167649f5f5d 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -910,7 +910,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; - goto out_put_group; + goto out; } /* @@ -948,7 +948,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) { btrfs_add_delayed_iput(inode); - goto out_put_group; + goto out; } clear_nlink(inode); /* One for the block groups ref */ @@ -971,13 +971,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); if (ret < 0) - goto out_put_group; + goto out; if (ret > 0) btrfs_release_path(path); if (ret == 0) { ret = btrfs_del_item(trans, tree_root, path); if (ret) - goto out_put_group; + goto out; btrfs_release_path(path); } @@ -986,6 +986,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, &fs_info->block_group_cache_tree); RB_CLEAR_NODE(&block_group->cache_node); + /* Once for the block groups rbtree */ + btrfs_put_block_group(block_group); + if (fs_info->first_logical_byte == block_group->key.objectid) fs_info->first_logical_byte = (u64)-1; spin_unlock(&fs_info->block_group_cache_lock); @@ -1094,10 +1097,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ret = remove_block_group_free_space(trans, block_group); if (ret) - goto out_put_group; - - /* Once for the block groups rbtree */ - btrfs_put_block_group(block_group); + goto out; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) @@ -1120,10 +1120,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, free_extent_map(em); } -out_put_group: +out: /* Once for the lookup reference */ btrfs_put_block_group(block_group); -out: if (remove_rsv) btrfs_delayed_refs_rsv_release(fs_info, 1); btrfs_free_path(path); @@ -1167,7 +1166,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( free_extent_map(em); return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, - num_items, 1); + num_items); } /* @@ -2169,7 +2168,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, return 0; } - if (trans->aborted) + if (TRANS_ABORTED(trans)) return 0; again: inode = lookup_free_space_inode(block_group, path); diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c index d07bd41a7c1e..343400d49bd1 100644 --- a/fs/btrfs/block-rsv.c +++ b/fs/btrfs/block-rsv.c @@ -5,6 +5,7 @@ #include "block-rsv.h" #include "space-info.h" #include "transaction.h" +#include "block-group.h" static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv, @@ -313,6 +314,8 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info) else block_rsv->full = 0; + if (block_rsv->size >= sinfo->total_bytes) + sinfo->force_alloc = CHUNK_ALLOC_FORCE; spin_unlock(&block_rsv->lock); spin_unlock(&sinfo->lock); } diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index a989105d39c8..c05127f50637 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1339,6 +1339,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin), + eb_rewin, btrfs_header_level(eb_rewin)); btrfs_tree_read_lock(eb_rewin); __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); WARN_ON(btrfs_header_nritems(eb_rewin) > @@ -1412,7 +1414,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq) if (!eb) return NULL; - btrfs_tree_read_lock(eb); if (old_root) { btrfs_set_header_bytenr(eb, eb->start); btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); @@ -1420,6 +1421,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq) btrfs_set_header_level(eb, old_root->level); btrfs_set_header_generation(eb, old_generation); } + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb, + btrfs_header_level(eb)); + btrfs_tree_read_lock(eb); if (tm) __tree_mod_log_rewind(fs_info, eb, time_seq, tm); else diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 169075550a5a..9a690c10afaa 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -940,6 +940,8 @@ enum { BTRFS_ROOT_DEAD_RELOC_TREE, /* Mark dead root stored on device whose cleanup needs to be resumed */ BTRFS_ROOT_DEAD_TREE, + /* The root has a log tree. Used only for subvolume roots. */ + BTRFS_ROOT_HAS_LOG_TREE, }; /* @@ -988,8 +990,10 @@ struct btrfs_root { wait_queue_head_t log_writer_wait; wait_queue_head_t log_commit_wait[2]; struct list_head log_ctxs[2]; + /* Used only for log trees of subvolumes, not for the log root tree */ atomic_t log_writers; atomic_t log_commit[2]; + /* Used only for log trees of subvolumes, not for the log root tree */ atomic_t log_batch; int log_transid; /* No matter the commit succeeds or not*/ @@ -2411,7 +2415,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes); int btrfs_exclude_logged_extents(struct extent_buffer *eb); int btrfs_cross_ref_exist(struct btrfs_root *root, - u64 objectid, u64 offset, u64 bytenr); + u64 objectid, u64 offset, u64 bytenr, bool strict); struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 parent, u64 root_objectid, @@ -2465,6 +2469,7 @@ enum btrfs_reserve_flush_enum { BTRFS_RESERVE_FLUSH_LIMIT, BTRFS_RESERVE_FLUSH_EVICT, BTRFS_RESERVE_FLUSH_ALL, + BTRFS_RESERVE_FLUSH_ALL_STEAL, }; enum btrfs_flush_state { @@ -2816,7 +2821,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, u64 start, u64 len); noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, u64 *orig_start, u64 *orig_block_len, - u64 *ram_bytes); + u64 *ram_bytes, bool strict); void __btrfs_del_delalloc_inode(struct btrfs_root *root, struct btrfs_inode *inode); @@ -2960,6 +2965,8 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, int btrfs_parse_options(struct btrfs_fs_info *info, char *options, unsigned long new_flags); int btrfs_sync_fs(struct super_block *sb, int wait); +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, + u64 subvol_objectid); static inline __printf(2, 3) __cold void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) @@ -3161,7 +3168,7 @@ do { \ /* Report first abort since mount */ \ if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ &((trans)->fs_info->fs_state))) { \ - if ((errno) != -EIO) { \ + if ((errno) != -EIO && (errno) != -EROFS) { \ WARN(1, KERN_DEBUG \ "BTRFS: Transaction aborted (error %d)\n", \ (errno)); \ diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 5bcccfbcc7c1..a34ee9c2f315 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1151,7 +1151,7 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr) int ret = 0; bool count = (nr > 0); - if (trans->aborted) + if (TRANS_ABORTED(trans)) return -EIO; path = btrfs_alloc_path(); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 273d1ccdd45d..dd6fb2ee8040 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1475,9 +1475,16 @@ int btrfs_init_fs_root(struct btrfs_root *root) spin_lock_init(&root->ino_cache_lock); init_waitqueue_head(&root->ino_cache_wait); - ret = get_anon_bdev(&root->anon_dev); - if (ret) - goto fail; + /* + * Don't assign anonymous block device to roots that are not exposed to + * userspace, the id pool is limited to 1M + */ + if (is_fstree(root->root_key.objectid) && + btrfs_root_refs(&root->root_item) > 0) { + ret = get_anon_bdev(&root->anon_dev); + if (ret) + goto fail; + } mutex_lock(&root->objectid_mutex); ret = btrfs_find_highest_objectid(root, @@ -4470,6 +4477,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) cache->io_ctl.inode = NULL; iput(inode); } + ASSERT(cache->io_ctl.pages == NULL); btrfs_put_block_group(cache); } diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index ddf28ecf17f9..93cceeba484c 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -57,9 +57,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, return type; } -static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, - u64 root_objectid, u32 generation, - int check_generation) +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, + u64 root_objectid, u32 generation, + int check_generation) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *root; @@ -152,7 +152,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1); } -static struct dentry *btrfs_get_parent(struct dentry *child) +struct dentry *btrfs_get_parent(struct dentry *child) { struct inode *dir = d_inode(child); struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h index 57488ecd7d4e..f32f4113c976 100644 --- a/fs/btrfs/export.h +++ b/fs/btrfs/export.h @@ -18,4 +18,9 @@ struct btrfs_fid { u64 parent_root_objectid; } __attribute__ ((packed)); +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, + u64 root_objectid, u32 generation, + int check_generation); +struct dentry *btrfs_get_parent(struct dentry *child); + #endif diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 47ecf7216b3e..31c1ed554d26 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -402,12 +402,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, if (type == BTRFS_SHARED_BLOCK_REF_KEY) { ASSERT(eb->fs_info); /* - * Every shared one has parent tree - * block, which must be aligned to - * nodesize. + * Every shared one has parent tree block, + * which must be aligned to sector size. */ if (offset && - IS_ALIGNED(offset, eb->fs_info->nodesize)) + IS_ALIGNED(offset, eb->fs_info->sectorsize)) return type; } } else if (is_data == BTRFS_REF_TYPE_DATA) { @@ -416,12 +415,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, if (type == BTRFS_SHARED_DATA_REF_KEY) { ASSERT(eb->fs_info); /* - * Every shared one has parent tree - * block, which must be aligned to - * nodesize. + * Every shared one has parent tree block, + * which must be aligned to sector size. */ if (offset && - IS_ALIGNED(offset, eb->fs_info->nodesize)) + IS_ALIGNED(offset, eb->fs_info->sectorsize)) return type; } } else { @@ -431,8 +429,9 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, } btrfs_print_leaf((struct extent_buffer *)eb); - btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d", - eb->start, type); + btrfs_err(eb->fs_info, + "eb %llu iref 0x%lx invalid extent inline ref type %d", + eb->start, (unsigned long)iref, type); WARN_ON(1); return BTRFS_REF_TYPE_INVALID; @@ -1561,7 +1560,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, int err = 0; int metadata = !extent_op->is_data; - if (trans->aborted) + if (TRANS_ABORTED(trans)) return 0; if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) @@ -1681,7 +1680,7 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, { int ret = 0; - if (trans->aborted) { + if (TRANS_ABORTED(trans)) { if (insert_reserved) btrfs_pin_extent(trans->fs_info, node->bytenr, node->num_bytes, 1); @@ -2169,7 +2168,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, int run_all = count == (unsigned long)-1; /* We'll clean this up in btrfs_cleanup_transaction */ - if (trans->aborted) + if (TRANS_ABORTED(trans)) return 0; if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) @@ -2320,7 +2319,8 @@ static noinline int check_delayed_ref(struct btrfs_root *root, static noinline int check_committed_ref(struct btrfs_root *root, struct btrfs_path *path, - u64 objectid, u64 offset, u64 bytenr) + u64 objectid, u64 offset, u64 bytenr, + bool strict) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *extent_root = fs_info->extent_root; @@ -2362,9 +2362,13 @@ static noinline int check_committed_ref(struct btrfs_root *root, btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) goto out; - /* If extent created before last snapshot => it's definitely shared */ - if (btrfs_extent_generation(leaf, ei) <= - btrfs_root_last_snapshot(&root->root_item)) + /* + * If extent created before last snapshot => it's shared unless the + * snapshot has been deleted. Use the heuristic if strict is false. + */ + if (!strict && + (btrfs_extent_generation(leaf, ei) <= + btrfs_root_last_snapshot(&root->root_item))) goto out; iref = (struct btrfs_extent_inline_ref *)(ei + 1); @@ -2389,7 +2393,7 @@ out: } int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, - u64 bytenr) + u64 bytenr, bool strict) { struct btrfs_path *path; int ret; @@ -2400,7 +2404,7 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, do { ret = check_committed_ref(root, path, objectid, - offset, bytenr); + offset, bytenr, strict); if (ret && ret != -ENOENT) goto out; @@ -2892,7 +2896,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) else unpin = &fs_info->freed_extents[0]; - while (!trans->aborted) { + while (!TRANS_ABORTED(trans)) { struct extent_state *cached_state = NULL; mutex_lock(&fs_info->unused_bg_unpin_mutex); @@ -2924,7 +2928,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) u64 trimmed = 0; ret = -EROFS; - if (!trans->aborted) + if (!TRANS_ABORTED(trans)) ret = btrfs_discard_extent(fs_info, block_group->key.objectid, block_group->key.offset, @@ -4441,7 +4445,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, return ERR_PTR(-EUCLEAN); } - btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); + btrfs_set_buffer_lockdep_class(owner, buf, level); btrfs_tree_lock(buf); btrfs_clean_tree_block(buf); clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); @@ -5221,7 +5225,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root, goto out; } - trans = btrfs_start_transaction(tree_root, 0); + /* + * Use join to avoid potential EINTR from transaction start. See + * wait_reserve_ticket and the whole reservation callchain. + */ + if (for_reloc) + trans = btrfs_join_transaction(tree_root); + else + trans = btrfs_start_transaction(tree_root, 0); if (IS_ERR(trans)) { err = PTR_ERR(trans); goto out_free; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8aab286f2028..60c21cfb1948 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1923,7 +1923,8 @@ static int __process_pages_contig(struct address_space *mapping, if (!PageDirty(pages[i]) || pages[i]->mapping != mapping) { unlock_page(pages[i]); - put_page(pages[i]); + for (; i < ret; i++) + put_page(pages[i]); err = -EAGAIN; goto out; } @@ -4072,7 +4073,7 @@ retry: if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { ret = flush_write_bio(&epd); } else { - ret = -EUCLEAN; + ret = -EROFS; end_write_bio(&epd, ret); } return ret; @@ -4466,20 +4467,32 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) free_extent_map(em); break; } - if (!test_range_bit(tree, em->start, - extent_map_end(em) - 1, - EXTENT_LOCKED, 0, NULL)) { + if (test_range_bit(tree, em->start, + extent_map_end(em) - 1, + EXTENT_LOCKED, 0, NULL)) + goto next; + /* + * If it's not in the list of modified extents, used + * by a fast fsync, we can remove it. If it's being + * logged we can safely remove it since fsync took an + * extra reference on the em. + */ + if (list_empty(&em->list) || + test_bit(EXTENT_FLAG_LOGGING, &em->flags)) { set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &btrfs_inode->runtime_flags); remove_extent_mapping(map, em); /* once for the rb tree */ free_extent_map(em); } +next: start = extent_map_end(em); write_unlock(&map->lock); /* once for us */ free_extent_map(em); + + cond_resched(); /* Allow large-extent preemption. */ } } return try_release_extent_state(tree, page, mask); @@ -5025,25 +5038,28 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, static void check_buffer_tree_ref(struct extent_buffer *eb) { int refs; - /* the ref bit is tricky. We have to make sure it is set - * if we have the buffer dirty. Otherwise the - * code to free a buffer can end up dropping a dirty - * page + /* + * The TREE_REF bit is first set when the extent_buffer is added + * to the radix tree. It is also reset, if unset, when a new reference + * is created by find_extent_buffer. * - * Once the ref bit is set, it won't go away while the - * buffer is dirty or in writeback, and it also won't - * go away while we have the reference count on the - * eb bumped. + * It is only cleared in two cases: freeing the last non-tree + * reference to the extent_buffer when its STALE bit is set or + * calling releasepage when the tree reference is the only reference. * - * We can't just set the ref bit without bumping the - * ref on the eb because free_extent_buffer might - * see the ref bit and try to clear it. If this happens - * free_extent_buffer might end up dropping our original - * ref by mistake and freeing the page before we are able - * to add one more ref. + * In both cases, care is taken to ensure that the extent_buffer's + * pages are not under io. However, releasepage can be concurrently + * called with creating new references, which is prone to race + * conditions between the calls to check_buffer_tree_ref in those + * codepaths and clearing TREE_REF in try_release_extent_buffer. * - * So bump the ref count first, then set the bit. If someone - * beat us to it, drop the ref we added. + * The actual lifetime of the extent_buffer in the radix tree is + * adequately protected by the refcount, but the TREE_REF bit and + * its corresponding reference are not. To protect against this + * class of races, we call check_buffer_tree_ref from the codepaths + * which trigger io after they set eb->io_pages. Note that once io is + * initiated, TREE_REF can no longer be cleared, so that is the + * moment at which any such race is best fixed. */ refs = atomic_read(&eb->refs); if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) @@ -5493,6 +5509,11 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num) clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); eb->read_mirror = 0; atomic_set(&eb->io_pages, num_reads); + /* + * It is possible for releasepage to clear the TREE_REF bit before we + * set io_pages. See check_buffer_tree_ref for a more detailed comment. + */ + check_buffer_tree_ref(eb); for (i = 0; i < num_pages; i++) { page = eb->pages[i]; @@ -5586,9 +5607,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, } } -int read_extent_buffer_to_user(const struct extent_buffer *eb, - void __user *dstv, - unsigned long start, unsigned long len) +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, + void __user *dstv, + unsigned long start, unsigned long len) { size_t cur; size_t offset; @@ -5609,7 +5630,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb, cur = min(len, (PAGE_SIZE - offset)); kaddr = page_address(page); - if (copy_to_user(dst, kaddr + offset, cur)) { + if (probe_user_write(dst, kaddr + offset, cur)) { ret = -EFAULT; break; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index cf3424d58fec..bc858c8cef0a 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -457,9 +457,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, void read_extent_buffer(const struct extent_buffer *eb, void *dst, unsigned long start, unsigned long len); -int read_extent_buffer_to_user(const struct extent_buffer *eb, - void __user *dst, unsigned long start, - unsigned long len); +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, + void __user *dst, unsigned long start, + unsigned long len); void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src); void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, const void *src); diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index f62a179f85bb..2b8f29c07668 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -798,10 +798,12 @@ again: nritems = btrfs_header_nritems(path->nodes[0]); if (!nritems || (path->slots[0] >= nritems - 1)) { ret = btrfs_next_leaf(root, path); - if (ret == 1) + if (ret < 0) { + goto out; + } else if (ret > 0) { found_next = 1; - if (ret != 0) goto insert; + } slot = path->slots[0]; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 3cfbccacef7f..4e4ddd5629e5 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1568,7 +1568,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, num_bytes = lockend - lockstart + 1; ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, - NULL, NULL, NULL); + NULL, NULL, NULL, false); if (ret <= 0) { ret = 0; btrfs_end_write_no_snapshotting(root); @@ -3130,14 +3130,14 @@ reserve_space: if (ret < 0) goto out; space_reserved = true; - ret = btrfs_qgroup_reserve_data(inode, &data_reserved, - alloc_start, bytes_to_reserve); - if (ret) - goto out; ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state); if (ret) goto out; + ret = btrfs_qgroup_reserve_data(inode, &data_reserved, + alloc_start, bytes_to_reserve); + if (ret) + goto out; ret = btrfs_prealloc_file_range(inode, mode, alloc_start, alloc_end - alloc_start, i_blocksize(inode), diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d86ada9c3c54..6e6be922b937 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1166,7 +1166,6 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root, ret = update_cache_item(trans, root, inode, path, offset, io_ctl->entries, io_ctl->bitmaps); out: - io_ctl_free(io_ctl); if (ret) { invalidate_inode_pages2(inode->i_mapping); BTRFS_I(inode)->generation = 0; @@ -1329,6 +1328,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, * them out later */ io_ctl_drop_pages(io_ctl); + io_ctl_free(io_ctl); unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, &cached_state); @@ -2166,7 +2166,7 @@ out: static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, bool update_stat) { - struct btrfs_free_space *left_info; + struct btrfs_free_space *left_info = NULL; struct btrfs_free_space *right_info; bool merged = false; u64 offset = info->offset; @@ -2181,7 +2181,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, if (right_info && rb_prev(&right_info->offset_index)) left_info = rb_entry(rb_prev(&right_info->offset_index), struct btrfs_free_space, offset_index); - else + else if (!right_info) left_info = tree_search_offset(ctl, offset - 1, 0, 0); if (right_info && !right_info->bitmap) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 94b0df3fb3c8..9ac40991a640 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -49,6 +49,7 @@ #include "qgroup.h" #include "delalloc-space.h" #include "block-group.h" +#include "space-info.h" struct btrfs_iget_args { struct btrfs_key *location; @@ -640,12 +641,18 @@ cont: page_error_op | PAGE_END_WRITEBACK); - for (i = 0; i < nr_pages; i++) { - WARN_ON(pages[i]->mapping); - put_page(pages[i]); + /* + * Ensure we only free the compressed pages if we have + * them allocated, as we can still reach here with + * inode_need_compress() == false. + */ + if (pages) { + for (i = 0; i < nr_pages; i++) { + WARN_ON(pages[i]->mapping); + put_page(pages[i]); + } + kfree(pages); } - kfree(pages); - return 0; } } @@ -974,6 +981,7 @@ static noinline int cow_file_range(struct inode *inode, u64 num_bytes; unsigned long ram_size; u64 cur_alloc_size = 0; + u64 min_alloc_size; u64 blocksize = fs_info->sectorsize; struct btrfs_key ins; struct extent_map *em; @@ -1024,10 +1032,26 @@ static noinline int cow_file_range(struct inode *inode, btrfs_drop_extent_cache(BTRFS_I(inode), start, start + num_bytes - 1, 0); + /* + * Relocation relies on the relocated extents to have exactly the same + * size as the original extents. Normally writeback for relocation data + * extents follows a NOCOW path because relocation preallocates the + * extents. However, due to an operation such as scrub turning a block + * group to RO mode, it may fallback to COW mode, so we must make sure + * an extent allocated during COW has exactly the requested size and can + * not be split into smaller extents, otherwise relocation breaks and + * fails during the stage where it updates the bytenr of file extent + * items. + */ + if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) + min_alloc_size = num_bytes; + else + min_alloc_size = fs_info->sectorsize; + while (num_bytes > 0) { cur_alloc_size = num_bytes; ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, - fs_info->sectorsize, 0, alloc_hint, + min_alloc_size, 0, alloc_hint, &ins, 1, 1); if (ret < 0) goto out_unlock; @@ -1132,7 +1156,7 @@ out_unlock: */ if (extent_reserved) { extent_clear_unlock_delalloc(inode, start, - start + cur_alloc_size, + start + cur_alloc_size - 1, locked_page, clear_bits, page_ops); @@ -1322,6 +1346,73 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, return 1; } +static int fallback_to_cow(struct inode *inode, struct page *locked_page, + const u64 start, const u64 end, + int *page_started, unsigned long *nr_written) +{ + const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode)); + const bool is_reloc_ino = (BTRFS_I(inode)->root->root_key.objectid == + BTRFS_DATA_RELOC_TREE_OBJECTID); + const u64 range_bytes = end + 1 - start; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + u64 range_start = start; + u64 count; + + /* + * If EXTENT_NORESERVE is set it means that when the buffered write was + * made we had not enough available data space and therefore we did not + * reserve data space for it, since we though we could do NOCOW for the + * respective file range (either there is prealloc extent or the inode + * has the NOCOW bit set). + * + * However when we need to fallback to COW mode (because for example the + * block group for the corresponding extent was turned to RO mode by a + * scrub or relocation) we need to do the following: + * + * 1) We increment the bytes_may_use counter of the data space info. + * If COW succeeds, it allocates a new data extent and after doing + * that it decrements the space info's bytes_may_use counter and + * increments its bytes_reserved counter by the same amount (we do + * this at btrfs_add_reserved_bytes()). So we need to increment the + * bytes_may_use counter to compensate (when space is reserved at + * buffered write time, the bytes_may_use counter is incremented); + * + * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so + * that if the COW path fails for any reason, it decrements (through + * extent_clear_unlock_delalloc()) the bytes_may_use counter of the + * data space info, which we incremented in the step above. + * + * If we need to fallback to cow and the inode corresponds to a free + * space cache inode or an inode of the data relocation tree, we must + * also increment bytes_may_use of the data space_info for the same + * reason. Space caches and relocated data extents always get a prealloc + * extent for them, however scrub or balance may have set the block + * group that contains that extent to RO mode and therefore force COW + * when starting writeback. + */ + count = count_range_bits(io_tree, &range_start, end, range_bytes, + EXTENT_NORESERVE, 0); + if (count > 0 || is_space_ino || is_reloc_ino) { + u64 bytes = count; + struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + struct btrfs_space_info *sinfo = fs_info->data_sinfo; + + if (is_space_ino || is_reloc_ino) + bytes = range_bytes; + + spin_lock(&sinfo->lock); + btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); + spin_unlock(&sinfo->lock); + + if (count > 0) + clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE, + 0, 0, NULL); + } + + return cow_file_range(inode, locked_page, start, end, page_started, + nr_written, 1); +} + /* * when nowcow writeback call back. This checks for snapshots or COW copies * of the extents that exist in the file, and COWs the file as required. @@ -1487,7 +1578,7 @@ next_slot: goto out_check; ret = btrfs_cross_ref_exist(root, ino, found_key.offset - - extent_offset, disk_bytenr); + extent_offset, disk_bytenr, false); if (ret) { /* * ret could be -EIO if the above fails to read @@ -1569,15 +1660,11 @@ out_check: * NOCOW, following one which needs to be COW'ed */ if (cow_start != (u64)-1) { - ret = cow_file_range(inode, locked_page, - cow_start, found_key.offset - 1, - page_started, nr_written, 1); - if (ret) { - if (nocow) - btrfs_dec_nocow_writers(fs_info, - disk_bytenr); + ret = fallback_to_cow(inode, locked_page, cow_start, + found_key.offset - 1, + page_started, nr_written); + if (ret) goto error; - } cow_start = (u64)-1; } @@ -1593,9 +1680,6 @@ out_check: ram_bytes, BTRFS_COMPRESS_NONE, BTRFS_ORDERED_PREALLOC); if (IS_ERR(em)) { - if (nocow) - btrfs_dec_nocow_writers(fs_info, - disk_bytenr); ret = PTR_ERR(em); goto error; } @@ -1660,8 +1744,8 @@ out_check: if (cow_start != (u64)-1) { cur_offset = end; - ret = cow_file_range(inode, locked_page, cow_start, end, - page_started, nr_written, 1); + ret = fallback_to_cow(inode, locked_page, cow_start, end, + page_started, nr_written); if (ret) goto error; } @@ -4250,7 +4334,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) * 1 for the inode ref * 1 for the inode */ - return btrfs_start_transaction_fallback_global_rsv(root, 5, 5); + return btrfs_start_transaction_fallback_global_rsv(root, 5); } static int btrfs_unlink(struct inode *dir, struct dentry *dentry) @@ -4603,6 +4687,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) } } + free_anon_bdev(dest->anon_dev); + dest->anon_dev = 0; out_end_trans: trans->block_rsv = NULL; trans->bytes_reserved = 0; @@ -7108,7 +7194,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, extent_type == BTRFS_FILE_EXTENT_PREALLOC) { /* Only regular file could have regular/prealloc extent */ if (!S_ISREG(inode->vfs_inode.i_mode)) { - ret = -EUCLEAN; + err = -EUCLEAN; btrfs_crit(fs_info, "regular/prealloc extent found for non-regular inode %llu", btrfs_ino(inode)); @@ -7443,7 +7529,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, */ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, u64 *orig_start, u64 *orig_block_len, - u64 *ram_bytes) + u64 *ram_bytes, bool strict) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_path *path; @@ -7521,8 +7607,9 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, * Do the same check as in btrfs_cross_ref_exist but without the * unnecessary search. */ - if (btrfs_file_extent_generation(leaf, fi) <= - btrfs_root_last_snapshot(&root->root_item)) + if (!strict && + (btrfs_file_extent_generation(leaf, fi) <= + btrfs_root_last_snapshot(&root->root_item))) goto out; backref_offset = btrfs_file_extent_offset(leaf, fi); @@ -7558,7 +7645,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, */ ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)), - key.offset - backref_offset, disk_bytenr); + key.offset - backref_offset, disk_bytenr, + strict); if (ret) { ret = 0; goto out; @@ -7779,7 +7867,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, block_start = em->block_start + (start - em->start); if (can_nocow_extent(inode, start, &len, &orig_start, - &orig_block_len, &ram_bytes) == 1 && + &orig_block_len, &ram_bytes, false) == 1 && btrfs_inc_nocow_writers(fs_info, block_start)) { struct extent_map *em2; @@ -8534,7 +8622,6 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) /* bio split */ ASSERT(geom.len <= INT_MAX); - atomic_inc(&dip->pending_bios); do { clone_len = min_t(int, submit_len, geom.len); @@ -8584,7 +8671,8 @@ submit: if (!status) return 0; - bio_put(bio); + if (bio != orig_bio) + bio_put(bio); out_err: dip->errors = 1; /* @@ -8625,7 +8713,7 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, bio->bi_private = dip; dip->orig_bio = bio; dip->dio_bio = dio_bio; - atomic_set(&dip->pending_bios, 0); + atomic_set(&dip->pending_bios, 1); io_bio = btrfs_io_bio(bio); io_bio->logical = file_offset; @@ -8772,9 +8860,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) dio_data.overwrite = 1; inode_unlock(inode); relock = true; - } else if (iocb->ki_flags & IOCB_NOWAIT) { - ret = -EAGAIN; - goto out; } ret = btrfs_delalloc_reserve_space(inode, &data_reserved, offset, count); @@ -9485,7 +9570,7 @@ void btrfs_destroy_inode(struct inode *inode) btrfs_put_ordered_extent(ordered); } } - btrfs_qgroup_check_reserved_leak(inode); + btrfs_qgroup_check_reserved_leak(BTRFS_I(inode)); inode_tree_del(inode); btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); } @@ -10947,7 +11032,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, free_extent_map(em); em = NULL; - ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL); + ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true); if (ret < 0) { goto out; } else if (ret) { diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d88b8d8897cc..63394b450afc 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -167,8 +167,11 @@ static int btrfs_ioctl_getflags(struct file *file, void __user *arg) return 0; } -/* Check if @flags are a supported and valid set of FS_*_FL flags */ -static int check_fsflags(unsigned int flags) +/* + * Check if @flags are a supported and valid set of FS_*_FL flags and that + * the old and new flags are not conflicting + */ +static int check_fsflags(unsigned int old_flags, unsigned int flags) { if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ @@ -177,9 +180,19 @@ static int check_fsflags(unsigned int flags) FS_NOCOW_FL)) return -EOPNOTSUPP; + /* COMPR and NOCOMP on new/old are valid */ if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) return -EINVAL; + if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL)) + return -EINVAL; + + /* NOCOW and compression options are mutually exclusive */ + if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL))) + return -EINVAL; + if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL))) + return -EINVAL; + return 0; } @@ -193,7 +206,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) unsigned int fsflags, old_fsflags; int ret; const char *comp = NULL; - u32 binode_flags = binode->flags; + u32 binode_flags; if (!inode_owner_or_capable(inode)) return -EPERM; @@ -204,22 +217,23 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) if (copy_from_user(&fsflags, arg, sizeof(fsflags))) return -EFAULT; - ret = check_fsflags(fsflags); - if (ret) - return ret; - ret = mnt_want_write_file(file); if (ret) return ret; inode_lock(inode); - fsflags = btrfs_mask_fsflags_for_type(inode, fsflags); old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags); + ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags); if (ret) goto out_unlock; + ret = check_fsflags(old_fsflags, fsflags); + if (ret) + goto out_unlock; + + binode_flags = binode->flags; if (fsflags & FS_SYNC_FL) binode_flags |= BTRFS_INODE_SYNC; else @@ -2091,9 +2105,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, sh.len = item_len; sh.transid = found_transid; - /* copy search result header */ - if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) { - ret = -EFAULT; + /* + * Copy search result header. If we fault then loop again so we + * can fault in the pages and -EFAULT there if there's a + * problem. Otherwise we'll fault and then copy the buffer in + * properly this next time through + */ + if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) { + ret = 0; goto out; } @@ -2101,10 +2120,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, if (item_len) { char __user *up = ubuf + *sk_offset; - /* copy the item */ - if (read_extent_buffer_to_user(leaf, up, - item_off, item_len)) { - ret = -EFAULT; + /* + * Copy the item, same behavior as above, but reset the + * * sk_offset so we copy the full thing again. + */ + if (read_extent_buffer_to_user_nofault(leaf, up, + item_off, item_len)) { + ret = 0; + *sk_offset -= sizeof(sh); goto out; } @@ -2192,6 +2215,11 @@ static noinline int search_ioctl(struct inode *inode, key.offset = sk->min_offset; while (1) { + ret = fault_in_pages_writeable(ubuf + sk_offset, + *buf_size - sk_offset); + if (ret) + break; + ret = btrfs_search_forward(root, &key, path, sk->min_transid); if (ret != 0) { if (ret > 0) diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 9cb50577d982..f4edadf1067f 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -95,9 +95,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) * offset is supposed to be a tree block which * must be aligned to nodesize. */ - if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) - pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n", - offset, (unsigned long long)eb->fs_info->nodesize); + if (!IS_ALIGNED(offset, eb->fs_info->sectorsize)) + pr_info( + "\t\t\t(parent %llu not aligned to sectorsize %u)\n", + offset, eb->fs_info->sectorsize); break; case BTRFS_EXTENT_DATA_REF_KEY: dref = (struct btrfs_extent_data_ref *)(&iref->offset); @@ -112,8 +113,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) * must be aligned to nodesize. */ if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) - pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n", - offset, (unsigned long long)eb->fs_info->nodesize); + pr_info( + "\t\t\t(parent %llu not aligned to sectorsize %u)\n", + offset, eb->fs_info->sectorsize); break; default: pr_cont("(extent %llu has INVALID ref type %d)\n", diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 590defdf8860..04fd02e6124d 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2636,6 +2636,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, struct btrfs_root *quota_root; struct btrfs_qgroup *srcgroup; struct btrfs_qgroup *dstgroup; + bool need_rescan = false; u32 level_size = 0; u64 nums; @@ -2779,6 +2780,13 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, goto unlock; } ++i_qgroups; + + /* + * If we're doing a snapshot, and adding the snapshot to a new + * qgroup, the numbers are guaranteed to be incorrect. + */ + if (srcid) + need_rescan = true; } for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { @@ -2798,6 +2806,9 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, dst->rfer = src->rfer - level_size; dst->rfer_cmpr = src->rfer_cmpr - level_size; + + /* Manually tweaking numbers certainly needs a rescan */ + need_rescan = true; } for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { struct btrfs_qgroup *src; @@ -2816,6 +2827,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, dst->excl = src->excl + level_size; dst->excl_cmpr = src->excl_cmpr + level_size; + need_rescan = true; } unlock: @@ -2823,6 +2835,8 @@ unlock: out: if (!committing) mutex_unlock(&fs_info->qgroup_ioctl_lock); + if (need_rescan) + fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; return ret; } @@ -3755,7 +3769,7 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) * Check qgroup reserved space leaking, normally at destroy inode * time */ -void btrfs_qgroup_check_reserved_leak(struct inode *inode) +void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode) { struct extent_changeset changeset; struct ulist_node *unode; @@ -3763,19 +3777,19 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode) int ret; extent_changeset_init(&changeset); - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, + ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, EXTENT_QGROUP_RESERVED, &changeset); WARN_ON(ret < 0); if (WARN_ON(changeset.bytes_changed)) { ULIST_ITER_INIT(&iter); while ((unode = ulist_next(&changeset.range_changed, &iter))) { - btrfs_warn(BTRFS_I(inode)->root->fs_info, - "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu", - inode->i_ino, unode->val, unode->aux); + btrfs_warn(inode->root->fs_info, + "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu", + btrfs_ino(inode), unode->val, unode->aux); } - btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, - BTRFS_I(inode)->root->root_key.objectid, + btrfs_qgroup_free_refroot(inode->root->fs_info, + inode->root->root_key.objectid, changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); } diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 17e8ac992c50..b0420c4f5d0e 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -399,7 +399,7 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root); */ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes); -void btrfs_qgroup_check_reserved_leak(struct inode *inode); +void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode); /* btrfs_qgroup_swapped_blocks related functions */ void btrfs_qgroup_init_swapped_blocks( diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index 454a1015d026..9a2f15f4c80e 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -286,6 +286,8 @@ static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info, exist_re = insert_root_entry(&exist->roots, re); if (exist_re) kfree(re); + } else { + kfree(re); } kfree(be); return exist; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index e890f09e2073..af3605a0bf2e 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2312,12 +2312,20 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, btrfs_unlock_up_safe(path, 0); } - min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; + /* + * In merge_reloc_root(), we modify the upper level pointer to swap the + * tree blocks between reloc tree and subvolume tree. Thus for tree + * block COW, we COW at most from level 1 to root level for each tree. + * + * Thus the needed metadata size is at most root_level * nodesize, + * and * 2 since we have two trees to COW. + */ + min_reserved = fs_info->nodesize * btrfs_root_level(root_item) * 2; memset(&next_key, 0, sizeof(next_key)); while (1) { ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved, - BTRFS_RESERVE_FLUSH_ALL); + BTRFS_RESERVE_FLUSH_LIMIT); if (ret) { err = ret; goto out; @@ -2525,12 +2533,10 @@ again: reloc_root = list_entry(reloc_roots.next, struct btrfs_root, root_list); + root = read_fs_root(fs_info, reloc_root->root_key.offset); if (btrfs_root_refs(&reloc_root->root_item) > 0) { - root = read_fs_root(fs_info, - reloc_root->root_key.offset); BUG_ON(IS_ERR(root)); BUG_ON(root->reloc_root != reloc_root); - ret = merge_reloc_root(rc, root); if (ret) { if (list_empty(&reloc_root->root_list)) @@ -2539,6 +2545,13 @@ again: goto out; } } else { + if (!IS_ERR(root)) { + if (root->reloc_root == reloc_root) + root->reloc_root = NULL; + clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, + &root->state); + } + list_del_init(&reloc_root->root_list); /* Don't forget to queue this reloc root for cleanup */ list_add_tail(&reloc_root->reloc_dirty_list, diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index a7b043fd7a57..93d7cb56e44b 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3717,7 +3717,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, struct btrfs_fs_info *fs_info = sctx->fs_info; if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) - return -EIO; + return -EROFS; /* Seed devices of a new filesystem has their own generation. */ if (scrub_dev->fs_devices != fs_info->fs_devices) @@ -3742,50 +3742,84 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, return 0; } +static void scrub_workers_put(struct btrfs_fs_info *fs_info) +{ + if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, + &fs_info->scrub_lock)) { + struct btrfs_workqueue *scrub_workers = NULL; + struct btrfs_workqueue *scrub_wr_comp = NULL; + struct btrfs_workqueue *scrub_parity = NULL; + + scrub_workers = fs_info->scrub_workers; + scrub_wr_comp = fs_info->scrub_wr_completion_workers; + scrub_parity = fs_info->scrub_parity_workers; + + fs_info->scrub_workers = NULL; + fs_info->scrub_wr_completion_workers = NULL; + fs_info->scrub_parity_workers = NULL; + mutex_unlock(&fs_info->scrub_lock); + + btrfs_destroy_workqueue(scrub_workers); + btrfs_destroy_workqueue(scrub_wr_comp); + btrfs_destroy_workqueue(scrub_parity); + } +} + /* * get a reference count on fs_info->scrub_workers. start worker if necessary */ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, int is_dev_replace) { + struct btrfs_workqueue *scrub_workers = NULL; + struct btrfs_workqueue *scrub_wr_comp = NULL; + struct btrfs_workqueue *scrub_parity = NULL; unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; int max_active = fs_info->thread_pool_size; + int ret = -ENOMEM; - lockdep_assert_held(&fs_info->scrub_lock); + if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) + return 0; + scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags, + is_dev_replace ? 1 : max_active, 4); + if (!scrub_workers) + goto fail_scrub_workers; + + scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, + max_active, 2); + if (!scrub_wr_comp) + goto fail_scrub_wr_completion_workers; + + scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags, + max_active, 2); + if (!scrub_parity) + goto fail_scrub_parity_workers; + + mutex_lock(&fs_info->scrub_lock); if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { - ASSERT(fs_info->scrub_workers == NULL); - fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", - flags, is_dev_replace ? 1 : max_active, 4); - if (!fs_info->scrub_workers) - goto fail_scrub_workers; - - ASSERT(fs_info->scrub_wr_completion_workers == NULL); - fs_info->scrub_wr_completion_workers = - btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, - max_active, 2); - if (!fs_info->scrub_wr_completion_workers) - goto fail_scrub_wr_completion_workers; - - ASSERT(fs_info->scrub_parity_workers == NULL); - fs_info->scrub_parity_workers = - btrfs_alloc_workqueue(fs_info, "scrubparity", flags, - max_active, 2); - if (!fs_info->scrub_parity_workers) - goto fail_scrub_parity_workers; - + ASSERT(fs_info->scrub_workers == NULL && + fs_info->scrub_wr_completion_workers == NULL && + fs_info->scrub_parity_workers == NULL); + fs_info->scrub_workers = scrub_workers; + fs_info->scrub_wr_completion_workers = scrub_wr_comp; + fs_info->scrub_parity_workers = scrub_parity; refcount_set(&fs_info->scrub_workers_refcnt, 1); - } else { - refcount_inc(&fs_info->scrub_workers_refcnt); + mutex_unlock(&fs_info->scrub_lock); + return 0; } - return 0; + /* Other thread raced in and created the workers for us */ + refcount_inc(&fs_info->scrub_workers_refcnt); + mutex_unlock(&fs_info->scrub_lock); + ret = 0; + btrfs_destroy_workqueue(scrub_parity); fail_scrub_parity_workers: - btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); + btrfs_destroy_workqueue(scrub_wr_comp); fail_scrub_wr_completion_workers: - btrfs_destroy_workqueue(fs_info->scrub_workers); + btrfs_destroy_workqueue(scrub_workers); fail_scrub_workers: - return -ENOMEM; + return ret; } int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, @@ -3796,9 +3830,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, int ret; struct btrfs_device *dev; unsigned int nofs_flag; - struct btrfs_workqueue *scrub_workers = NULL; - struct btrfs_workqueue *scrub_wr_comp = NULL; - struct btrfs_workqueue *scrub_parity = NULL; if (btrfs_fs_closing(fs_info)) return -EAGAIN; @@ -3845,13 +3876,17 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, if (IS_ERR(sctx)) return PTR_ERR(sctx); + ret = scrub_workers_get(fs_info, is_dev_replace); + if (ret) + goto out_free_ctx; + mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && !is_dev_replace)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -ENODEV; - goto out_free_ctx; + goto out; } if (!is_dev_replace && !readonly && @@ -3860,7 +3895,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable", rcu_str_deref(dev->name)); ret = -EROFS; - goto out_free_ctx; + goto out; } mutex_lock(&fs_info->scrub_lock); @@ -3869,7 +3904,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EIO; - goto out_free_ctx; + goto out; } down_read(&fs_info->dev_replace.rwsem); @@ -3880,17 +3915,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EINPROGRESS; - goto out_free_ctx; + goto out; } up_read(&fs_info->dev_replace.rwsem); - ret = scrub_workers_get(fs_info, is_dev_replace); - if (ret) { - mutex_unlock(&fs_info->scrub_lock); - mutex_unlock(&fs_info->fs_devices->device_list_mutex); - goto out_free_ctx; - } - sctx->readonly = readonly; dev->scrub_ctx = sctx; mutex_unlock(&fs_info->fs_devices->device_list_mutex); @@ -3943,24 +3971,14 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, mutex_lock(&fs_info->scrub_lock); dev->scrub_ctx = NULL; - if (refcount_dec_and_test(&fs_info->scrub_workers_refcnt)) { - scrub_workers = fs_info->scrub_workers; - scrub_wr_comp = fs_info->scrub_wr_completion_workers; - scrub_parity = fs_info->scrub_parity_workers; - - fs_info->scrub_workers = NULL; - fs_info->scrub_wr_completion_workers = NULL; - fs_info->scrub_parity_workers = NULL; - } mutex_unlock(&fs_info->scrub_lock); - btrfs_destroy_workqueue(scrub_workers); - btrfs_destroy_workqueue(scrub_wr_comp); - btrfs_destroy_workqueue(scrub_parity); + scrub_workers_put(fs_info); scrub_put_ctx(sctx); return ret; - +out: + scrub_workers_put(fs_info); out_free_ctx: scrub_free_ctx(sctx); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 3eb0fec2488a..6ad216e8178e 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -23,6 +23,7 @@ #include "btrfs_inode.h" #include "transaction.h" #include "compression.h" +#include "xattr.h" /* * Maximum number of references an extent can have in order for us to attempt to @@ -4536,6 +4537,10 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, struct fs_path *p; struct posix_acl_xattr_header dummy_acl; + /* Capabilities are emitted by finish_inode_if_needed */ + if (!strncmp(name, XATTR_NAME_CAPS, name_len)) + return 0; + p = fs_path_alloc(); if (!p) return -ENOMEM; @@ -5098,6 +5103,64 @@ static int send_extent_data(struct send_ctx *sctx, return 0; } +/* + * Search for a capability xattr related to sctx->cur_ino. If the capability is + * found, call send_set_xattr function to emit it. + * + * Return 0 if there isn't a capability, or when the capability was emitted + * successfully, or < 0 if an error occurred. + */ +static int send_capabilities(struct send_ctx *sctx) +{ + struct fs_path *fspath = NULL; + struct btrfs_path *path; + struct btrfs_dir_item *di; + struct extent_buffer *leaf; + unsigned long data_ptr; + char *buf = NULL; + int buf_len; + int ret = 0; + + path = alloc_path_for_send(); + if (!path) + return -ENOMEM; + + di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino, + XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0); + if (!di) { + /* There is no xattr for this inode */ + goto out; + } else if (IS_ERR(di)) { + ret = PTR_ERR(di); + goto out; + } + + leaf = path->nodes[0]; + buf_len = btrfs_dir_data_len(leaf, di); + + fspath = fs_path_alloc(); + buf = kmalloc(buf_len, GFP_KERNEL); + if (!fspath || !buf) { + ret = -ENOMEM; + goto out; + } + + ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); + if (ret < 0) + goto out; + + data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); + read_extent_buffer(leaf, buf, data_ptr, buf_len); + + ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, + strlen(XATTR_NAME_CAPS), buf, buf_len); +out: + kfree(buf); + fs_path_free(fspath); + btrfs_free_path(path); + return ret; +} + static int clone_range(struct send_ctx *sctx, struct clone_root *clone_root, const u64 disk_byte, @@ -6001,6 +6064,10 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) goto out; } + ret = send_capabilities(sctx); + if (ret < 0) + goto out; + /* * If other directory inodes depended on our current directory * inode's move/rename, now do their move/rename operations. diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index e8a4b0ebe97f..6f484f0d347e 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -304,8 +304,8 @@ again: cache->key.objectid, cache->key.offset, btrfs_block_group_used(&cache->item), cache->pinned, cache->reserved, cache->ro ? "[readonly]" : ""); - btrfs_dump_free_space(cache, bytes); spin_unlock(&cache->lock); + btrfs_dump_free_space(cache, bytes); } if (++index < BTRFS_NR_RAID_TYPES) goto again; @@ -462,6 +462,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, struct reserve_ticket *ticket = NULL; struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv; struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; + struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; struct btrfs_trans_handle *trans; u64 bytes_needed; u64 reclaim_bytes = 0; @@ -524,6 +525,11 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, spin_lock(&delayed_refs_rsv->lock); reclaim_bytes += delayed_refs_rsv->reserved; spin_unlock(&delayed_refs_rsv->lock); + + spin_lock(&trans_rsv->lock); + reclaim_bytes += trans_rsv->reserved; + spin_unlock(&trans_rsv->lock); + if (reclaim_bytes >= bytes_needed) goto commit; bytes_needed -= reclaim_bytes; @@ -683,6 +689,34 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); } +static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info, + struct reserve_ticket *ticket) +{ + struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; + u64 min_bytes; + + if (global_rsv->space_info != space_info) + return false; + + spin_lock(&global_rsv->lock); + min_bytes = div_factor(global_rsv->size, 5); + if (global_rsv->reserved < min_bytes + ticket->bytes) { + spin_unlock(&global_rsv->lock); + return false; + } + global_rsv->reserved -= ticket->bytes; + ticket->bytes = 0; + list_del_init(&ticket->list); + wake_up(&ticket->wait); + space_info->tickets_id++; + if (global_rsv->reserved < global_rsv->size) + global_rsv->full = 0; + spin_unlock(&global_rsv->lock); + + return true; +} + /* * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets * @fs_info - fs_info for this fs @@ -715,6 +749,10 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, ticket = list_first_entry(&space_info->tickets, struct reserve_ticket, list); + if (ticket->steal && + steal_from_global_rsv(fs_info, space_info, ticket)) + return true; + /* * may_commit_transaction will avoid committing the transaction * if it doesn't feel like the space reclaimed by the commit @@ -934,6 +972,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, switch (flush) { case BTRFS_RESERVE_FLUSH_ALL: + case BTRFS_RESERVE_FLUSH_ALL_STEAL: wait_reserve_ticket(fs_info, space_info, ticket); break; case BTRFS_RESERVE_FLUSH_LIMIT: @@ -1033,7 +1072,9 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, ticket.bytes = orig_bytes; ticket.error = 0; init_waitqueue_head(&ticket.wait); - if (flush == BTRFS_RESERVE_FLUSH_ALL) { + ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); + if (flush == BTRFS_RESERVE_FLUSH_ALL || + flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) { list_add_tail(&ticket.list, &space_info->tickets); if (!space_info->flush) { space_info->flush = 1; diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index 8867e84aa33d..8b9a1d8fefcb 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -72,6 +72,7 @@ struct btrfs_space_info { struct reserve_ticket { u64 bytes; int error; + bool steal; struct list_head list; wait_queue_head_t wait; }; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index aea24202cd35..6a2ae208ff80 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -241,7 +241,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, { struct btrfs_fs_info *fs_info = trans->fs_info; - trans->aborted = errno; + WRITE_ONCE(trans->aborted, errno); /* Nothing used. The other threads that have joined this * transaction may be able to continue. */ if (!trans->dirty && list_empty(&trans->new_bgs)) { @@ -435,6 +435,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, char *compress_type; bool compress_force = false; enum btrfs_compression_type saved_compress_type; + int saved_compress_level; bool saved_compress_force; int no_compress = 0; @@ -517,6 +518,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, info->compress_type : BTRFS_COMPRESS_NONE; saved_compress_force = btrfs_test_opt(info, FORCE_COMPRESS); + saved_compress_level = info->compress_level; if (token == Opt_compress || token == Opt_compress_force || strncmp(args[0].from, "zlib", 4) == 0) { @@ -542,6 +544,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, } else if (strncmp(args[0].from, "lzo", 3) == 0) { compress_type = "lzo"; info->compress_type = BTRFS_COMPRESS_LZO; + info->compress_level = 0; btrfs_set_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, NODATACOW); btrfs_clear_opt(info->mount_opt, NODATASUM); @@ -561,6 +564,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, no_compress = 0; } else if (strncmp(args[0].from, "no", 2) == 0) { compress_type = "no"; + info->compress_level = 0; + info->compress_type = 0; btrfs_clear_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); compress_force = false; @@ -581,11 +586,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, */ btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); } - if ((btrfs_test_opt(info, COMPRESS) && - (info->compress_type != saved_compress_type || - compress_force != saved_compress_force)) || - (!btrfs_test_opt(info, COMPRESS) && - no_compress == 1)) { + if (no_compress == 1) { + btrfs_info(info, "use no compression"); + } else if ((info->compress_type != saved_compress_type) || + (compress_force != saved_compress_force) || + (info->compress_level != saved_compress_level)) { btrfs_info(info, "%s %s compression, level %d", (compress_force) ? "force" : "use", compress_type, info->compress_level); @@ -1005,8 +1010,8 @@ out: return error; } -static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, - u64 subvol_objectid) +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, + u64 subvol_objectid) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_root *fs_root; @@ -1287,6 +1292,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) { struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb); const char *compress_type; + const char *subvol_name; if (btrfs_test_opt(info, DEGRADED)) seq_puts(seq, ",degraded"); @@ -1371,8 +1377,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) seq_puts(seq, ",ref_verify"); seq_printf(seq, ",subvolid=%llu", BTRFS_I(d_inode(dentry))->root->root_key.objectid); - seq_puts(seq, ",subvol="); - seq_dentry(seq, dentry, " \t\n\\"); + subvol_name = btrfs_get_subvol_name_from_objectid(info, + BTRFS_I(d_inode(dentry))->root->root_key.objectid); + if (!IS_ERR(subvol_name)) { + seq_puts(seq, ",subvol="); + seq_escape(seq, subvol_name, " \t\n\\"); + kfree(subvol_name); + } return 0; } @@ -1417,8 +1428,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, goto out; } } - subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb), - subvol_objectid); + subvol_name = btrfs_get_subvol_name_from_objectid( + btrfs_sb(mnt->mnt_sb), subvol_objectid); if (IS_ERR(subvol_name)) { root = ERR_CAST(subvol_name); subvol_name = NULL; @@ -1848,6 +1859,12 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) set_bit(BTRFS_FS_OPEN, &fs_info->flags); } out: + /* + * We need to set SB_I_VERSION here otherwise it'll get cleared by VFS, + * since the absence of the flag means it can be toggled off by remount. + */ + *flags |= SB_I_VERSION; + wake_up_process(fs_info->transaction_kthread); btrfs_remount_cleanup(fs_info, old_opts); return 0; @@ -2254,9 +2271,7 @@ static int btrfs_unfreeze(struct super_block *sb) static int btrfs_show_devname(struct seq_file *m, struct dentry *root) { struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); - struct btrfs_fs_devices *cur_devices; struct btrfs_device *dev, *first_dev = NULL; - struct list_head *head; /* * Lightweight locking of the devices. We should not need @@ -2266,18 +2281,13 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root) * least until the rcu_read_unlock. */ rcu_read_lock(); - cur_devices = fs_info->fs_devices; - while (cur_devices) { - head = &cur_devices->devices; - list_for_each_entry_rcu(dev, head, dev_list) { - if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) - continue; - if (!dev->name) - continue; - if (!first_dev || dev->devid < first_dev->devid) - first_dev = dev; - } - cur_devices = cur_devices->seed; + list_for_each_entry_rcu(dev, &fs_info->fs_devices->devices, dev_list) { + if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) + continue; + if (!dev->name) + continue; + if (!first_dev || dev->devid < first_dev->devid) + first_dev = dev; } if (first_dev) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index f6d3c80f2e28..5c299e1f2297 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -975,7 +975,9 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, { int error = 0; struct btrfs_device *dev; + unsigned int nofs_flag; + nofs_flag = memalloc_nofs_save(); list_for_each_entry(dev, &fs_devices->devices, dev_list) { struct hd_struct *disk; struct kobject *disk_kobj; @@ -994,6 +996,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, if (error) break; } + memalloc_nofs_restore(nofs_flag); return error; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index cdca0f656594..c346ee7ec18d 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -21,6 +21,7 @@ #include "dev-replace.h" #include "qgroup.h" #include "block-group.h" +#include "space-info.h" #define BTRFS_ROOT_TRANS_TAG 0 @@ -173,7 +174,7 @@ loop: cur_trans = fs_info->running_transaction; if (cur_trans) { - if (cur_trans->aborted) { + if (TRANS_ABORTED(cur_trans)) { spin_unlock(&fs_info->trans_lock); return cur_trans->aborted; } @@ -389,7 +390,7 @@ static inline int is_transaction_blocked(struct btrfs_transaction *trans) { return (trans->state >= TRANS_STATE_BLOCKED && trans->state < TRANS_STATE_UNBLOCKED && - !trans->aborted); + !TRANS_ABORTED(trans)); } /* wait for commit against the current transaction to become unblocked @@ -408,7 +409,7 @@ static void wait_current_trans(struct btrfs_fs_info *fs_info) wait_event(fs_info->transaction_wait, cur_trans->state >= TRANS_STATE_UNBLOCKED || - cur_trans->aborted); + TRANS_ABORTED(cur_trans)); btrfs_put_transaction(cur_trans); } else { spin_unlock(&fs_info->trans_lock); @@ -451,6 +452,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, u64 num_bytes = 0; u64 qgroup_reserved = 0; bool reloc_reserved = false; + bool do_chunk_alloc = false; int ret; /* Send isn't supposed to start transactions. */ @@ -491,7 +493,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, * refill that amount for whatever is missing in the reserve. */ num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); - if (delayed_refs_rsv->full == 0) { + if (flush == BTRFS_RESERVE_FLUSH_ALL && + delayed_refs_rsv->full == 0) { delayed_refs_bytes = num_bytes; num_bytes <<= 1; } @@ -512,6 +515,9 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, delayed_refs_bytes); num_bytes -= delayed_refs_bytes; } + + if (rsv->space_info->force_alloc) + do_chunk_alloc = true; } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL && !delayed_refs_rsv->full) { /* @@ -593,6 +599,19 @@ got_it: if (!current->journal_info) current->journal_info = h; + /* + * If the space_info is marked ALLOC_FORCE then we'll get upgraded to + * ALLOC_FORCE the first run through, and then we won't allocate for + * anybody else who races in later. We don't care about the return + * value here. + */ + if (do_chunk_alloc && num_bytes) { + u64 flags = h->block_rsv->space_info->flags; + + btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags), + CHUNK_ALLOC_NO_FORCE); + } + /* * btrfs_record_root_in_trans() needs to alloc new extents, and may * call btrfs_join_transaction() while we're also starting a @@ -627,43 +646,10 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( struct btrfs_root *root, - unsigned int num_items, - int min_factor) + unsigned int num_items) { - struct btrfs_fs_info *fs_info = root->fs_info; - struct btrfs_trans_handle *trans; - u64 num_bytes; - int ret; - - /* - * We have two callers: unlink and block group removal. The - * former should succeed even if we will temporarily exceed - * quota and the latter operates on the extent root so - * qgroup enforcement is ignored anyway. - */ - trans = start_transaction(root, num_items, TRANS_START, - BTRFS_RESERVE_FLUSH_ALL, false); - if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) - return trans; - - trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) - return trans; - - num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); - ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv, - num_bytes, min_factor); - if (ret) { - btrfs_end_transaction(trans); - return ERR_PTR(ret); - } - - trans->block_rsv = &fs_info->trans_block_rsv; - trans->bytes_reserved = num_bytes; - trace_btrfs_space_reservation(fs_info, "transaction", - trans->transid, num_bytes, 1); - - return trans; + return start_transaction(root, num_items, TRANS_START, + BTRFS_RESERVE_FLUSH_ALL_STEAL, false); } struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) @@ -884,10 +870,13 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, if (throttle) btrfs_run_delayed_iputs(info); - if (trans->aborted || + if (TRANS_ABORTED(trans) || test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) { wake_up_process(info->transaction_kthread); - err = -EIO; + if (TRANS_ABORTED(trans)) + err = trans->aborted; + else + err = -EROFS; } kmem_cache_free(btrfs_trans_handle_cachep, trans); @@ -1741,7 +1730,8 @@ static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info, struct btrfs_transaction *trans) { wait_event(fs_info->transaction_blocked_wait, - trans->state >= TRANS_STATE_COMMIT_START || trans->aborted); + trans->state >= TRANS_STATE_COMMIT_START || + TRANS_ABORTED(trans)); } /* @@ -1753,7 +1743,8 @@ static void wait_current_trans_commit_start_and_unblock( struct btrfs_transaction *trans) { wait_event(fs_info->transaction_wait, - trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted); + trans->state >= TRANS_STATE_UNBLOCKED || + TRANS_ABORTED(trans)); } /* @@ -1971,7 +1962,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) trans->dirty = true; /* Stop the commit early if ->aborted is set */ - if (unlikely(READ_ONCE(cur_trans->aborted))) { + if (TRANS_ABORTED(cur_trans)) { ret = cur_trans->aborted; btrfs_end_transaction(trans); return ret; @@ -2045,7 +2036,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) wait_for_commit(cur_trans); - if (unlikely(cur_trans->aborted)) + if (TRANS_ABORTED(cur_trans)) ret = cur_trans->aborted; btrfs_put_transaction(cur_trans); @@ -2064,7 +2055,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) spin_unlock(&fs_info->trans_lock); wait_for_commit(prev_trans); - ret = prev_trans->aborted; + ret = READ_ONCE(prev_trans->aborted); btrfs_put_transaction(prev_trans); if (ret) @@ -2118,8 +2109,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) wait_event(cur_trans->writer_wait, atomic_read(&cur_trans->num_writers) == 1); - /* ->aborted might be set after the previous check, so check it */ - if (unlikely(READ_ONCE(cur_trans->aborted))) { + if (TRANS_ABORTED(cur_trans)) { ret = cur_trans->aborted; goto scrub_continue; } @@ -2237,7 +2227,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) * The tasks which save the space cache and inode cache may also * update ->aborted, check it. */ - if (unlikely(READ_ONCE(cur_trans->aborted))) { + if (TRANS_ABORTED(cur_trans)) { ret = cur_trans->aborted; mutex_unlock(&fs_info->tree_log_mutex); mutex_unlock(&fs_info->reloc_mutex); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 2c5a6f6e5bb0..7291a2a93075 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -116,6 +116,10 @@ struct btrfs_trans_handle { struct btrfs_block_rsv *orig_rsv; refcount_t use_count; unsigned int type; + /* + * Error code of transaction abort, set outside of locks and must use + * the READ_ONCE/WRITE_ONCE access + */ short aborted; bool adding_csums; bool allocating_chunk; @@ -127,6 +131,14 @@ struct btrfs_trans_handle { struct list_head new_bgs; }; +/* + * The abort status can be changed between calls and is not protected by locks. + * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's + * set to a non-zero value it does not change, so the macro should be in checks + * but is not necessary for further reads of the value. + */ +#define TRANS_ABORTED(trans) (unlikely(READ_ONCE((trans)->aborted))) + struct btrfs_pending_snapshot { struct dentry *dentry; struct inode *dir; @@ -181,8 +193,7 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, unsigned int num_items); struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( struct btrfs_root *root, - unsigned int num_items, - int min_factor); + unsigned int num_items); struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root); diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 0e44db066641..91ea38506fbb 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -772,7 +772,7 @@ static int check_inode_item(struct extent_buffer *leaf, /* Here we use super block generation + 1 to handle log tree */ if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) { inode_item_err(fs_info, leaf, slot, - "invalid inode generation: has %llu expect (0, %llu]", + "invalid inode transid: has %llu expect [0, %llu]", btrfs_inode_generation(leaf, iitem), super_gen + 1); return -EUCLEAN; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 7d464b049507..7042b84edc89 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -167,6 +167,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans, if (ret) goto out; + set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); root->log_start_pid = current->pid; } @@ -193,6 +194,9 @@ static int join_running_log_trans(struct btrfs_root *root) { int ret = -ENOENT; + if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state)) + return ret; + mutex_lock(&root->log_mutex); if (root->log_root) { ret = 0; @@ -3136,29 +3140,17 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, btrfs_init_log_ctx(&root_log_ctx, NULL); mutex_lock(&log_root_tree->log_mutex); - atomic_inc(&log_root_tree->log_batch); - atomic_inc(&log_root_tree->log_writers); index2 = log_root_tree->log_transid % 2; list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); root_log_ctx.log_transid = log_root_tree->log_transid; - mutex_unlock(&log_root_tree->log_mutex); - - mutex_lock(&log_root_tree->log_mutex); - /* * Now we are safe to update the log_root_tree because we're under the * log_mutex, and we're a current writer so we're holding the commit * open until we drop the log_mutex. */ ret = update_log_root(trans, log, &new_root_item); - - if (atomic_dec_and_test(&log_root_tree->log_writers)) { - /* atomic_dec_and_test implies a barrier */ - cond_wake_up_nomb(&log_root_tree->log_writer_wait); - } - if (ret) { if (!list_empty(&root_log_ctx.list)) list_del_init(&root_log_ctx.list); @@ -3204,8 +3196,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, root_log_ctx.log_transid - 1); } - wait_for_writer(log_root_tree); - /* * now that we've moved on to the tree of log tree roots, * check the full commit flag again @@ -3327,6 +3317,7 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) if (root->log_root) { free_log_tree(trans, root->log_root); root->log_root = NULL; + clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); } return 0; } @@ -3482,11 +3473,13 @@ fail: btrfs_free_path(path); out_unlock: mutex_unlock(&dir->log_mutex); - if (ret == -ENOSPC) { + if (err == -ENOSPC) { btrfs_set_log_full_commit(trans); - ret = 0; - } else if (ret < 0) - btrfs_abort_transaction(trans, ret); + err = 0; + } else if (err < 0 && err != -ENOENT) { + /* ENOENT can be returned if the entry hasn't been fsynced yet */ + btrfs_abort_transaction(trans, err); + } btrfs_end_log_trans(root); @@ -4049,11 +4042,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, fs_info->csum_root, ds + cs, ds + cs + cl - 1, &ordered_sums, 0); - if (ret) { - btrfs_release_path(dst_path); - kfree(ins_data); - return ret; - } + if (ret) + break; } } } @@ -4066,7 +4056,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, * we have to do this after the loop above to avoid changing the * log tree while trying to change the log tree. */ - ret = 0; while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, struct btrfs_ordered_sum, @@ -5007,6 +4996,138 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, return ret; } +static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode, + struct btrfs_key *min_key, + const struct btrfs_key *max_key, + struct btrfs_path *path, + struct btrfs_path *dst_path, + const u64 logged_isize, + const bool recursive_logging, + const int inode_only, + struct btrfs_log_ctx *ctx, + bool *need_log_inode_item) +{ + struct btrfs_root *root = inode->root; + int ins_start_slot = 0; + int ins_nr = 0; + int ret; + + while (1) { + ret = btrfs_search_forward(root, min_key, path, trans->transid); + if (ret < 0) + return ret; + if (ret > 0) { + ret = 0; + break; + } +again: + /* Note, ins_nr might be > 0 here, cleanup outside the loop */ + if (min_key->objectid != max_key->objectid) + break; + if (min_key->type > max_key->type) + break; + + if (min_key->type == BTRFS_INODE_ITEM_KEY) + *need_log_inode_item = false; + + if ((min_key->type == BTRFS_INODE_REF_KEY || + min_key->type == BTRFS_INODE_EXTREF_KEY) && + inode->generation == trans->transid && + !recursive_logging) { + u64 other_ino = 0; + u64 other_parent = 0; + + ret = btrfs_check_ref_name_override(path->nodes[0], + path->slots[0], min_key, inode, + &other_ino, &other_parent); + if (ret < 0) { + return ret; + } else if (ret > 0 && ctx && + other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { + if (ins_nr > 0) { + ins_nr++; + } else { + ins_nr = 1; + ins_start_slot = path->slots[0]; + } + ret = copy_items(trans, inode, dst_path, path, + ins_start_slot, ins_nr, + inode_only, logged_isize); + if (ret < 0) + return ret; + ins_nr = 0; + + ret = log_conflicting_inodes(trans, root, path, + ctx, other_ino, other_parent); + if (ret) + return ret; + btrfs_release_path(path); + goto next_key; + } + } + + /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ + if (min_key->type == BTRFS_XATTR_ITEM_KEY) { + if (ins_nr == 0) + goto next_slot; + ret = copy_items(trans, inode, dst_path, path, + ins_start_slot, + ins_nr, inode_only, logged_isize); + if (ret < 0) + return ret; + ins_nr = 0; + goto next_slot; + } + + if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { + ins_nr++; + goto next_slot; + } else if (!ins_nr) { + ins_start_slot = path->slots[0]; + ins_nr = 1; + goto next_slot; + } + + ret = copy_items(trans, inode, dst_path, path, ins_start_slot, + ins_nr, inode_only, logged_isize); + if (ret < 0) + return ret; + ins_nr = 1; + ins_start_slot = path->slots[0]; +next_slot: + path->slots[0]++; + if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { + btrfs_item_key_to_cpu(path->nodes[0], min_key, + path->slots[0]); + goto again; + } + if (ins_nr) { + ret = copy_items(trans, inode, dst_path, path, + ins_start_slot, ins_nr, inode_only, + logged_isize); + if (ret < 0) + return ret; + ins_nr = 0; + } + btrfs_release_path(path); +next_key: + if (min_key->offset < (u64)-1) { + min_key->offset++; + } else if (min_key->type < max_key->type) { + min_key->type++; + min_key->offset = 0; + } else { + break; + } + } + if (ins_nr) + ret = copy_items(trans, inode, dst_path, path, ins_start_slot, + ins_nr, inode_only, logged_isize); + + return ret; +} + /* log a single inode in the tree log. * At least one parent directory for this inode must exist in the tree * or be logged already. @@ -5028,17 +5149,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, const loff_t end, struct btrfs_log_ctx *ctx) { - struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_path *path; struct btrfs_path *dst_path; struct btrfs_key min_key; struct btrfs_key max_key; struct btrfs_root *log = root->log_root; int err = 0; - int ret; - int nritems; - int ins_start_slot = 0; - int ins_nr; + int ret = 0; bool fast_search = false; u64 ino = btrfs_ino(inode); struct extent_map_tree *em_tree = &inode->extent_tree; @@ -5074,15 +5191,19 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, max_key.offset = (u64)-1; /* - * Only run delayed items if we are a dir or a new file. - * Otherwise commit the delayed inode only, which is needed in - * order for the log replay code to mark inodes for link count - * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). + * Only run delayed items if we are a directory. We want to make sure + * all directory indexes hit the fs/subvolume tree so we can find them + * and figure out which index ranges have to be logged. + * + * Otherwise commit the delayed inode only if the full sync flag is set, + * as we want to make sure an up to date version is in the subvolume + * tree so copy_inode_items_to_log() / copy_items() can find it and copy + * it to the log tree. For a non full sync, we always log the inode item + * based on the in-memory struct btrfs_inode which is always up to date. */ - if (S_ISDIR(inode->vfs_inode.i_mode) || - inode->generation > fs_info->last_trans_committed) + if (S_ISDIR(inode->vfs_inode.i_mode)) ret = btrfs_commit_inode_delayed_items(trans, inode); - else + else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags)) ret = btrfs_commit_inode_delayed_inode(inode); if (ret) { @@ -5169,139 +5290,12 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, goto out_unlock; } - while (1) { - ins_nr = 0; - ret = btrfs_search_forward(root, &min_key, - path, trans->transid); - if (ret < 0) { - err = ret; - goto out_unlock; - } - if (ret != 0) - break; -again: - /* note, ins_nr might be > 0 here, cleanup outside the loop */ - if (min_key.objectid != ino) - break; - if (min_key.type > max_key.type) - break; - - if (min_key.type == BTRFS_INODE_ITEM_KEY) - need_log_inode_item = false; - - if ((min_key.type == BTRFS_INODE_REF_KEY || - min_key.type == BTRFS_INODE_EXTREF_KEY) && - inode->generation == trans->transid && - !recursive_logging) { - u64 other_ino = 0; - u64 other_parent = 0; - - ret = btrfs_check_ref_name_override(path->nodes[0], - path->slots[0], &min_key, inode, - &other_ino, &other_parent); - if (ret < 0) { - err = ret; - goto out_unlock; - } else if (ret > 0 && ctx && - other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { - if (ins_nr > 0) { - ins_nr++; - } else { - ins_nr = 1; - ins_start_slot = path->slots[0]; - } - ret = copy_items(trans, inode, dst_path, path, - ins_start_slot, - ins_nr, inode_only, - logged_isize); - if (ret < 0) { - err = ret; - goto out_unlock; - } - ins_nr = 0; - - err = log_conflicting_inodes(trans, root, path, - ctx, other_ino, other_parent); - if (err) - goto out_unlock; - btrfs_release_path(path); - goto next_key; - } - } - - /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ - if (min_key.type == BTRFS_XATTR_ITEM_KEY) { - if (ins_nr == 0) - goto next_slot; - ret = copy_items(trans, inode, dst_path, path, - ins_start_slot, - ins_nr, inode_only, logged_isize); - if (ret < 0) { - err = ret; - goto out_unlock; - } - ins_nr = 0; - goto next_slot; - } - - if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { - ins_nr++; - goto next_slot; - } else if (!ins_nr) { - ins_start_slot = path->slots[0]; - ins_nr = 1; - goto next_slot; - } - - ret = copy_items(trans, inode, dst_path, path, - ins_start_slot, ins_nr, inode_only, - logged_isize); - if (ret < 0) { - err = ret; - goto out_unlock; - } - ins_nr = 1; - ins_start_slot = path->slots[0]; -next_slot: - - nritems = btrfs_header_nritems(path->nodes[0]); - path->slots[0]++; - if (path->slots[0] < nritems) { - btrfs_item_key_to_cpu(path->nodes[0], &min_key, - path->slots[0]); - goto again; - } - if (ins_nr) { - ret = copy_items(trans, inode, dst_path, path, - ins_start_slot, - ins_nr, inode_only, logged_isize); - if (ret < 0) { - err = ret; - goto out_unlock; - } - ins_nr = 0; - } - btrfs_release_path(path); -next_key: - if (min_key.offset < (u64)-1) { - min_key.offset++; - } else if (min_key.type < max_key.type) { - min_key.type++; - min_key.offset = 0; - } else { - break; - } - } - if (ins_nr) { - ret = copy_items(trans, inode, dst_path, path, - ins_start_slot, ins_nr, inode_only, - logged_isize); - if (ret < 0) { - err = ret; - goto out_unlock; - } - ins_nr = 0; - } + err = copy_inode_items_to_log(trans, inode, &min_key, &max_key, + path, dst_path, logged_isize, + recursive_logging, inode_only, ctx, + &need_log_inode_item); + if (err) + goto out_unlock; btrfs_release_path(path); btrfs_release_path(dst_path); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 3e64f49c394b..4ecd6663dfb5 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4,6 +4,7 @@ */ #include +#include #include #include #include @@ -219,7 +220,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, * * global::fs_devs - add, remove, updates to the global list * - * does not protect: manipulation of the fs_devices::devices list! + * does not protect: manipulation of the fs_devices::devices list in general + * but in mount context it could be used to exclude list modifications by eg. + * scan ioctl * * btrfs_device::name - renames (write side), read is RCU * @@ -232,6 +235,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, * may be used to exclude some operations from running concurrently without any * modifications to the list (see write_all_supers) * + * Is not required at mount and close times, because our device list is + * protected by the uuid_mutex at that point. + * * balance_mutex * ------------- * protects balance structures (status, state) and context accessed from @@ -778,6 +784,11 @@ static int btrfs_free_stale_devices(const char *path, return ret; } +/* + * This is only used on mount, and we are protected from competing things + * messing with our fs_devices by the uuid_mutex, thus we do not need the + * fs_devices->device_list_mutex here. + */ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, struct btrfs_device *device, fmode_t flags, void *holder) @@ -1223,6 +1234,8 @@ again: &device->dev_state)) { if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state) && + !test_bit(BTRFS_DEV_STATE_MISSING, + &device->dev_state) && (!latest_dev || device->generation > latest_dev->generation)) { latest_dev = device; @@ -1416,8 +1429,14 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int ret; lockdep_assert_held(&uuid_mutex); + /* + * The device_list_mutex cannot be taken here in case opening the + * underlying device takes further locks like bd_mutex. + * + * We also don't need the lock here as this is called during mount and + * exclusion is provided by uuid_mutex + */ - mutex_lock(&fs_devices->device_list_mutex); if (fs_devices->opened) { fs_devices->opened++; ret = 0; @@ -1425,7 +1444,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, list_sort(NULL, &fs_devices->devices, devid_cmp); ret = open_fs_devices(fs_devices, flags, holder); } - mutex_unlock(&fs_devices->device_list_mutex); return ret; } @@ -2769,8 +2787,18 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path ret = btrfs_commit_transaction(trans); } - /* Update ctime/mtime for libblkid */ + /* + * Now that we have written a new super block to this device, check all + * other fs_devices list if device_path alienates any other scanned + * device. + * We can ignore the return value as it typically returns -EINVAL and + * only succeeds if the device was an alien. + */ + btrfs_forget_devices(device_path); + + /* Update ctime/mtime for blkid or udev */ update_dev_time(device_path); + return ret; error_sysfs: @@ -3271,7 +3299,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info, if (!path) return -ENOMEM; - trans = btrfs_start_transaction(root, 0); + trans = btrfs_start_transaction_fallback_global_rsv(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); return PTR_ERR(trans); @@ -4234,7 +4262,22 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, mutex_lock(&fs_info->balance_mutex); if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) btrfs_info(fs_info, "balance: paused"); - else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req)) + /* + * Balance can be canceled by: + * + * - Regular cancel request + * Then ret == -ECANCELED and balance_cancel_req > 0 + * + * - Fatal signal to "btrfs" process + * Either the signal caught by wait_reserve_ticket() and callers + * got -EINTR, or caught by btrfs_should_cancel_balance() and + * got -ECANCELED. + * Either way, in this case balance_cancel_req = 0, and + * ret == -EINTR or ret == -ECANCELED. + * + * So here we only check the return value to catch canceled balance. + */ + else if (ret == -ECANCELED || ret == -EINTR) btrfs_info(fs_info, "balance: canceled"); else btrfs_info(fs_info, "balance: ended with status: %d", ret); @@ -4526,6 +4569,7 @@ static int btrfs_uuid_scan_kthread(void *data) goto skip; } update_tree: + btrfs_release_path(path); if (!btrfs_is_empty_uuid(root_item.uuid)) { ret = btrfs_uuid_tree_add(trans, root_item.uuid, BTRFS_UUID_KEY_SUBVOL, @@ -4550,6 +4594,7 @@ update_tree: } skip: + btrfs_release_path(path); if (trans) { ret = btrfs_end_transaction(trans); trans = NULL; @@ -4557,7 +4602,6 @@ skip: break; } - btrfs_release_path(path); if (key.offset < (u64)-1) { key.offset++; } else if (key.type < BTRFS_ROOT_ITEM_KEY) { @@ -6665,8 +6709,17 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, u64 devid, u8 *dev_uuid) { struct btrfs_device *device; + unsigned int nofs_flag; + /* + * We call this under the chunk_mutex, so we want to use NOFS for this + * allocation, however we don't want to change btrfs_alloc_device() to + * always do NOFS because we use it in a lot of other GFP_KERNEL safe + * places. + */ + nofs_flag = memalloc_nofs_save(); device = btrfs_alloc_device(NULL, &devid, dev_uuid); + memalloc_nofs_restore(nofs_flag); if (IS_ERR(device)) return device; @@ -7255,7 +7308,14 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) * otherwise we don't need it. */ mutex_lock(&uuid_mutex); - mutex_lock(&fs_info->chunk_mutex); + + /* + * It is possible for mount and umount to race in such a way that + * we execute this code path, but open_fs_devices failed to clear + * total_rw_bytes. We certainly want it cleared before reading the + * device items, so clear it here. + */ + fs_info->fs_devices->total_rw_bytes = 0; /* * Read all device items, and then all the chunk items. All @@ -7292,7 +7352,9 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { struct btrfs_chunk *chunk; chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); + mutex_lock(&fs_info->chunk_mutex); ret = read_one_chunk(&found_key, leaf, chunk); + mutex_unlock(&fs_info->chunk_mutex); if (ret) goto error; } @@ -7322,7 +7384,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) } ret = 0; error: - mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&uuid_mutex); btrfs_free_path(path); diff --git a/fs/buffer.c b/fs/buffer.c index 79c9562434a8..22d8ac4a8c40 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3170,6 +3170,15 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) WARN_ON(atomic_read(&bh->b_count) < 1); lock_buffer(bh); if (test_clear_buffer_dirty(bh)) { + /* + * The bh should be mapped, but it might not be if the + * device was hot-removed. Not much we can do but fail the I/O. + */ + if (!buffer_mapped(bh)) { + unlock_buffer(bh); + return -EIO; + } + get_bh(bh); bh->b_end_io = end_buffer_write_sync; ret = submit_bh(REQ_OP_WRITE, op_flags, bh); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 2e4764fd1872..3367a8194f24 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -920,6 +920,10 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry, req->r_num_caps = 2; req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; + if (as_ctx.pagelist) { + req->r_pagelist = as_ctx.pagelist; + as_ctx.pagelist = NULL; + } err = ceph_mdsc_do_request(mdsc, dir, req); if (!err && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); diff --git a/fs/ceph/export.c b/fs/ceph/export.c index 79dc06881e78..e088843a7734 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -172,9 +172,16 @@ struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino) static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino) { struct inode *inode = __lookup_inode(sb, ino); + int err; + if (IS_ERR(inode)) return ERR_CAST(inode); - if (inode->i_nlink == 0) { + /* We need LINK caps to reliably check i_nlink */ + err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false); + if (err) + return ERR_PTR(err); + /* -ESTALE if inode as been unlinked and no file is open */ + if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) { iput(inode); return ERR_PTR(-ESTALE); } diff --git a/fs/ceph/file.c b/fs/ceph/file.c index ce54a1b12819..a10711a6337a 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1260,6 +1260,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) struct inode *inode = file_inode(filp); struct ceph_inode_info *ci = ceph_inode(inode); struct page *pinned_page = NULL; + bool direct_lock = iocb->ki_flags & IOCB_DIRECT; ssize_t ret; int want, got = 0; int retry_op = 0, read = 0; @@ -1268,7 +1269,7 @@ again: dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); - if (iocb->ki_flags & IOCB_DIRECT) + if (direct_lock) ceph_start_io_direct(inode); else ceph_start_io_read(inode); @@ -1325,7 +1326,7 @@ again: } ceph_put_cap_refs(ci, got); - if (iocb->ki_flags & IOCB_DIRECT) + if (direct_lock) ceph_end_io_direct(inode); else ceph_end_io_read(inode); @@ -2197,6 +2198,7 @@ const struct file_operations ceph_file_fops = { .mmap = ceph_mmap, .fsync = ceph_fsync, .lock = ceph_lock, + .setlease = simple_nosetlease, .flock = ceph_flock, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index b79fe6549df6..0f21073a51a1 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3091,8 +3091,10 @@ static void handle_session(struct ceph_mds_session *session, goto bad; /* version >= 3, feature bits */ ceph_decode_32_safe(&p, end, len, bad); - ceph_decode_64_safe(&p, end, features, bad); - p += len - sizeof(features); + if (len) { + ceph_decode_64_safe(&p, end, features, bad); + p += len - sizeof(features); + } } mutex_lock(&mdsc->mutex); @@ -4066,6 +4068,9 @@ static void delayed_work(struct work_struct *work) dout("mdsc delayed_work\n"); + if (mdsc->stopping) + return; + mutex_lock(&mdsc->mutex); renew_interval = mdsc->mdsmap->m_session_timeout >> 2; renew_caps = time_after_eq(jiffies, HZ*renew_interval + @@ -4141,7 +4146,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) return -ENOMEM; } - fsc->mdsc = mdsc; init_completion(&mdsc->safe_umount_waiters); init_waitqueue_head(&mdsc->session_close_wq); INIT_LIST_HEAD(&mdsc->waiting_for_map); @@ -4193,6 +4197,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) strscpy(mdsc->nodename, utsname()->nodename, sizeof(mdsc->nodename)); + + fsc->mdsc = mdsc; return 0; } @@ -4430,7 +4436,16 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) { dout("stop\n"); - cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ + /* + * Make sure the delayed work stopped before releasing + * the resources. + * + * Because the cancel_delayed_work_sync() will only + * guarantee that the work finishes executing. But the + * delayed work will re-arm itself again after that. + */ + flush_delayed_work(&mdsc->delayed_work); + if (mdsc->mdsmap) ceph_mdsmap_destroy(mdsc->mdsmap); kfree(mdsc->sessions); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 721b2560caa7..f5df2a4195c2 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -614,26 +614,26 @@ cifs_reconnect(struct TCP_Server_Info *server) try_to_freeze(); mutex_lock(&server->srv_mutex); +#ifdef CONFIG_CIFS_DFS_UPCALL /* * Set up next DFS target server (if any) for reconnect. If DFS * feature is disabled, then we will retry last server we * connected to before. */ + reconn_inval_dfs_target(server, cifs_sb, &tgt_list, &tgt_it); +#endif + rc = reconn_set_ipaddr(server); + if (rc) { + cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n", + __func__, rc); + } + if (cifs_rdma_enabled(server)) rc = smbd_reconnect(server); else rc = generic_ip_connect(server); if (rc) { cifs_dbg(FYI, "reconnect error %d\n", rc); -#ifdef CONFIG_CIFS_DFS_UPCALL - reconn_inval_dfs_target(server, cifs_sb, &tgt_list, - &tgt_it); -#endif - rc = reconn_set_ipaddr(server); - if (rc) { - cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n", - __func__, rc); - } mutex_unlock(&server->srv_mutex); msleep(3000); } else { @@ -5281,9 +5281,15 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) vol_info->nocase = master_tcon->nocase; vol_info->nohandlecache = master_tcon->nohandlecache; vol_info->local_lease = master_tcon->local_lease; + vol_info->no_lease = master_tcon->no_lease; + vol_info->resilient = master_tcon->use_resilient; + vol_info->persistent = master_tcon->use_persistent; + vol_info->handle_timeout = master_tcon->handle_timeout; vol_info->no_linux_ext = !master_tcon->unix_ext; + vol_info->linux_ext = master_tcon->posix_extensions; vol_info->sectype = master_tcon->ses->sectype; vol_info->sign = master_tcon->ses->sign; + vol_info->seal = master_tcon->seal; rc = cifs_set_vol_auth(vol_info, master_tcon->ses); if (rc) { @@ -5309,10 +5315,6 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) goto out; } - /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */ - if (tcon->posix_extensions) - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; - if (cap_unix(ses)) reset_cifs_unix_caps(0, tcon, NULL, vol_info); diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 5e6bc8fa4e46..17df90b5f57a 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -898,6 +898,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) { rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, true, full_path, fid); + if (rc == -EREMOTE) + rc = 0; if (rc) { cifs_dbg(FYI, "%s: Get mode from SID failed. rc=%d\n", __func__, rc); @@ -906,6 +908,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, false, full_path, fid); + if (rc == -EREMOTE) + rc = 0; if (rc) { cifs_dbg(FYI, "%s: Getting ACL failed with error: %d\n", __func__, rc); @@ -2264,6 +2268,15 @@ set_size_out: if (rc == 0) { cifsInode->server_eof = attrs->ia_size; cifs_setsize(inode, attrs->ia_size); + + /* + * The man page of truncate says if the size changed, + * then the st_ctime and st_mtime fields for the file + * are updated. + */ + attrs->ia_ctime = attrs->ia_mtime = current_time(inode); + attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME; + cifs_truncate_page(inode->i_mapping, inode->i_size); } diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 14265b4bbcc0..2fc96f7923ee 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -509,15 +509,31 @@ cifs_ses_oplock_break(struct work_struct *work) kfree(lw); } -static bool -smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, - struct smb2_lease_break_work *lw) +static void +smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key, + __le32 new_lease_state) +{ + struct smb2_lease_break_work *lw; + + lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL); + if (!lw) { + cifs_put_tlink(tlink); + return; + } + + INIT_WORK(&lw->lease_break, cifs_ses_oplock_break); + lw->tlink = tlink; + lw->lease_state = new_lease_state; + memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE); + queue_work(cifsiod_wq, &lw->lease_break); +} + +static bool +smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp) { - bool found; __u8 lease_state; struct list_head *tmp; struct cifsFileInfo *cfile; - struct cifs_pending_open *open; struct cifsInodeInfo *cinode; int ack_req = le32_to_cpu(rsp->Flags & SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); @@ -556,22 +572,29 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, &cinode->flags); cifs_queue_oplock_break(cfile); - kfree(lw); return true; } - found = false; + return false; +} + +static struct cifs_pending_open * +smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon, + struct smb2_lease_break *rsp) +{ + __u8 lease_state = le32_to_cpu(rsp->NewLeaseState); + int ack_req = le32_to_cpu(rsp->Flags & + SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); + struct cifs_pending_open *open; + struct cifs_pending_open *found = NULL; + list_for_each_entry(open, &tcon->pending_opens, olist) { if (memcmp(open->lease_key, rsp->LeaseKey, SMB2_LEASE_KEY_SIZE)) continue; if (!found && ack_req) { - found = true; - memcpy(lw->lease_key, open->lease_key, - SMB2_LEASE_KEY_SIZE); - lw->tlink = cifs_get_tlink(open->tlink); - queue_work(cifsiod_wq, &lw->lease_break); + found = open; } cifs_dbg(FYI, "found in the pending open list\n"); @@ -592,14 +615,7 @@ smb2_is_valid_lease_break(char *buffer) struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; - struct smb2_lease_break_work *lw; - - lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL); - if (!lw) - return false; - - INIT_WORK(&lw->lease_break, cifs_ses_oplock_break); - lw->lease_state = rsp->NewLeaseState; + struct cifs_pending_open *open; cifs_dbg(FYI, "Checking for lease break\n"); @@ -617,11 +633,27 @@ smb2_is_valid_lease_break(char *buffer) spin_lock(&tcon->open_file_lock); cifs_stats_inc( &tcon->stats.cifs_stats.num_oplock_brks); - if (smb2_tcon_has_lease(tcon, rsp, lw)) { + if (smb2_tcon_has_lease(tcon, rsp)) { spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } + open = smb2_tcon_find_pending_open_lease(tcon, + rsp); + if (open) { + __u8 lease_key[SMB2_LEASE_KEY_SIZE]; + struct tcon_link *tlink; + + tlink = cifs_get_tlink(open->tlink); + memcpy(lease_key, open->lease_key, + SMB2_LEASE_KEY_SIZE); + spin_unlock(&tcon->open_file_lock); + spin_unlock(&cifs_tcp_ses_lock); + smb2_queue_pending_open_break(tlink, + lease_key, + rsp->NewLeaseState); + return true; + } spin_unlock(&tcon->open_file_lock); if (tcon->crfid.is_valid && @@ -639,7 +671,6 @@ smb2_is_valid_lease_break(char *buffer) } } spin_unlock(&cifs_tcp_ses_lock); - kfree(lw); cifs_dbg(FYI, "Can not process lease break - no lease matched\n"); return false; } diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 58915d882285..7ccbfc656478 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -736,6 +736,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid) /* close extra handle outside of crit sec */ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); } + rc = 0; goto oshr_free; } @@ -2969,6 +2970,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid, ses->Suid, offset, len); + /* + * We zero the range through ioctl, so we need remove the page caches + * first, otherwise the data may be inconsistent with the server. + */ + truncate_pagecache_range(inode, offset, offset + len - 1); /* if file not oplocked can't be sure whether asking to extend size */ if (!CIFS_CACHE_READ(cifsi)) @@ -3035,6 +3041,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, return rc; } + /* + * We implement the punch hole through ioctl, so we need remove the page + * caches first, otherwise the data may be inconsistent with the server. + */ + truncate_pagecache_range(inode, offset, offset + len - 1); + cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len); fsctl_buf.FileOffset = cpu_to_le64(offset); diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 06b1a86d76b1..7ff05c06f2a4 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1323,6 +1323,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) spnego_key = cifs_get_spnego_key(ses); if (IS_ERR(spnego_key)) { rc = PTR_ERR(spnego_key); + if (rc == -ENOKEY) + cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n"); spnego_key = NULL; goto out; } diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index fe1552cc8a0a..eafc49de4d7f 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -528,7 +528,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits, const int timeout, const int flags, unsigned int *instance) { - int rc; + long rc; int *credits; int optype; long int t; diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index b43960794922..943637298f65 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -176,7 +176,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp) goto out; if (!fops_get(real_fops)) { -#ifdef MODULE +#ifdef CONFIG_MODULES if (real_fops->owner && real_fops->owner->state == MODULE_STATE_GOING) goto out; @@ -311,7 +311,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp) goto out; if (!fops_get(real_fops)) { -#ifdef MODULE +#ifdef CONFIG_MODULES if (real_fops->owner && real_fops->owner->state == MODULE_STATE_GOING) goto out; diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 416d9de35679..4311d01b02a8 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -97,7 +97,6 @@ do { \ __LINE__, __FILE__, #x, jiffies); \ {do} \ printk("\n"); \ - BUG(); \ panic("DLM: Record message above and reboot.\n"); \ } \ } diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index afb8340918b8..c689359ca532 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -632,6 +632,9 @@ static int new_lockspace(const char *name, const char *cluster, wait_event(ls->ls_recover_lock_wait, test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); + /* let kobject handle freeing of ls if there's an error */ + do_unreg = 1; + ls->ls_kobj.kset = dlm_kset; error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL, "%s", ls->ls_name); @@ -639,9 +642,6 @@ static int new_lockspace(const char *name, const char *cluster, goto out_recoverd; kobject_uevent(&ls->ls_kobj, KOBJ_ADD); - /* let kobject handle freeing of ls if there's an error */ - do_unreg = 1; - /* This uevent triggers dlm_controld in userspace to add us to the group of nodes that are members of this lockspace (managed by the cluster infrastructure.) Once it's done that, it tells us who the diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 3350ab65d892..b36b414cd7a7 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -8,31 +8,80 @@ #include -/* no locking */ -static int erofs_read_inode(struct inode *inode, void *data) +/* + * if inode is successfully read, return its inode page (or sometimes + * the inode payload page if it's an extended inode) in order to fill + * inline data if possible. + */ +static struct page *erofs_read_inode(struct inode *inode, + unsigned int *ofs) { + struct super_block *sb = inode->i_sb; + struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_inode *vi = EROFS_I(inode); - struct erofs_inode_compact *dic = data; - struct erofs_inode_extended *die; + const erofs_off_t inode_loc = iloc(sbi, vi->nid); - const unsigned int ifmt = le16_to_cpu(dic->i_format); - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - erofs_blk_t nblks = 0; + erofs_blk_t blkaddr, nblks = 0; + struct page *page; + struct erofs_inode_compact *dic; + struct erofs_inode_extended *die, *copied = NULL; + unsigned int ifmt; + int err; + + blkaddr = erofs_blknr(inode_loc); + *ofs = erofs_blkoff(inode_loc); + + erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u", + __func__, vi->nid, *ofs, blkaddr); + + page = erofs_get_meta_page(sb, blkaddr); + if (IS_ERR(page)) { + erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", + vi->nid, PTR_ERR(page)); + return page; + } + + dic = page_address(page) + *ofs; + ifmt = le16_to_cpu(dic->i_format); vi->datalayout = erofs_inode_datalayout(ifmt); - if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", vi->datalayout, vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_out; } switch (erofs_inode_version(ifmt)) { case EROFS_INODE_LAYOUT_EXTENDED: - die = data; - vi->inode_isize = sizeof(struct erofs_inode_extended); + /* check if the inode acrosses page boundary */ + if (*ofs + vi->inode_isize <= PAGE_SIZE) { + *ofs += vi->inode_isize; + die = (struct erofs_inode_extended *)dic; + } else { + const unsigned int gotten = PAGE_SIZE - *ofs; + + copied = kmalloc(vi->inode_isize, GFP_NOFS); + if (!copied) { + err = -ENOMEM; + goto err_out; + } + memcpy(copied, dic, gotten); + unlock_page(page); + put_page(page); + + page = erofs_get_meta_page(sb, blkaddr + 1); + if (IS_ERR(page)) { + erofs_err(sb, "failed to get inode payload page (nid: %llu), err %ld", + vi->nid, PTR_ERR(page)); + kfree(copied); + return page; + } + *ofs = vi->inode_isize - gotten; + memcpy((u8 *)copied + gotten, page_address(page), *ofs); + die = copied; + } vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); inode->i_mode = le16_to_cpu(die->i_mode); @@ -69,9 +118,12 @@ static int erofs_read_inode(struct inode *inode, void *data) /* total blocks for compressed files */ if (erofs_inode_is_data_compressed(vi->datalayout)) nblks = le32_to_cpu(die->i_u.compressed_blocks); + + kfree(copied); break; case EROFS_INODE_LAYOUT_COMPACT: vi->inode_isize = sizeof(struct erofs_inode_compact); + *ofs += vi->inode_isize; vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); inode->i_mode = le16_to_cpu(dic->i_mode); @@ -111,8 +163,8 @@ static int erofs_read_inode(struct inode *inode, void *data) erofs_err(inode->i_sb, "unsupported on-disk inode version %u of nid %llu", erofs_inode_version(ifmt), vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_out; } if (!nblks) @@ -120,13 +172,18 @@ static int erofs_read_inode(struct inode *inode, void *data) inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; else inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK; - return 0; + return page; bogusimode: erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid); + err = -EFSCORRUPTED; +err_out: DBG_BUGON(1); - return -EFSCORRUPTED; + kfree(copied); + unlock_page(page); + put_page(page); + return ERR_PTR(err); } static int erofs_fill_symlink(struct inode *inode, void *data, @@ -146,7 +203,7 @@ static int erofs_fill_symlink(struct inode *inode, void *data, if (!lnk) return -ENOMEM; - m_pofs += vi->inode_isize + vi->xattr_isize; + m_pofs += vi->xattr_isize; /* inline symlink data shouldn't cross page boundary as well */ if (m_pofs + inode->i_size > PAGE_SIZE) { kfree(lnk); @@ -167,37 +224,17 @@ static int erofs_fill_symlink(struct inode *inode, void *data, static int erofs_fill_inode(struct inode *inode, int isdir) { - struct super_block *sb = inode->i_sb; struct erofs_inode *vi = EROFS_I(inode); struct page *page; - void *data; - int err; - erofs_blk_t blkaddr; unsigned int ofs; - erofs_off_t inode_loc; + int err = 0; trace_erofs_fill_inode(inode, isdir); - inode_loc = iloc(EROFS_SB(sb), vi->nid); - blkaddr = erofs_blknr(inode_loc); - ofs = erofs_blkoff(inode_loc); - erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u", - __func__, vi->nid, ofs, blkaddr); - - page = erofs_get_meta_page(sb, blkaddr); - - if (IS_ERR(page)) { - erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", - vi->nid, PTR_ERR(page)); + /* read inode base data from disk */ + page = erofs_read_inode(inode, &ofs); + if (IS_ERR(page)) return PTR_ERR(page); - } - - DBG_BUGON(!PageUptodate(page)); - data = page_address(page); - - err = erofs_read_inode(inode, data + ofs); - if (err) - goto out_unlock; /* setup the new inode */ switch (inode->i_mode & S_IFMT) { @@ -210,7 +247,7 @@ static int erofs_fill_inode(struct inode *inode, int isdir) inode->i_fop = &erofs_dir_fops; break; case S_IFLNK: - err = erofs_fill_symlink(inode, data, ofs); + err = erofs_fill_symlink(inode, page_address(page), ofs); if (err) goto out_unlock; inode_nohighmem(inode); diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h index faf950189bd7..568d5a493876 100644 --- a/fs/erofs/zdata.h +++ b/fs/erofs/zdata.h @@ -148,22 +148,22 @@ static inline void z_erofs_onlinepage_init(struct page *page) static inline void z_erofs_onlinepage_fixup(struct page *page, uintptr_t index, bool down) { - unsigned long *p, o, v, id; -repeat: - p = &page_private(page); - o = READ_ONCE(*p); + union z_erofs_onlinepage_converter u = { .v = &page_private(page) }; + int orig, orig_index, val; - id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; - if (id) { +repeat: + orig = atomic_read(u.o); + orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; + if (orig_index) { if (!index) return; - DBG_BUGON(id != index); + DBG_BUGON(orig_index != index); } - v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | - ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); - if (cmpxchg(p, o, v) != o) + val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | + ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); + if (atomic_cmpxchg(u.o, orig, val) != orig) goto repeat; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 6307c1d883e0..ae1d32344f7a 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1991,9 +1991,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) * not already there, and calling reverse_path_check() * during ep_insert(). */ - if (list_empty(&epi->ffd.file->f_tfile_llink)) - list_add(&epi->ffd.file->f_tfile_llink, - &tfile_check_list); + if (list_empty(&epi->ffd.file->f_tfile_llink)) { + if (get_file_rcu(epi->ffd.file)) + list_add(&epi->ffd.file->f_tfile_llink, + &tfile_check_list); + } } } mutex_unlock(&ep->mtx); @@ -2037,6 +2039,7 @@ static void clear_tfile_check_list(void) file = list_first_entry(&tfile_check_list, struct file, f_tfile_llink); list_del_init(&file->f_tfile_llink); + fput(file); } INIT_LIST_HEAD(&tfile_check_list); } @@ -2192,13 +2195,13 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_lock(&epmutex); if (is_file_epoll(tf.file)) { error = -ELOOP; - if (ep_loop_check(ep, tf.file) != 0) { - clear_tfile_check_list(); + if (ep_loop_check(ep, tf.file) != 0) goto error_tgt_fput; - } - } else + } else { + get_file(tf.file); list_add(&tf.file->f_tfile_llink, &tfile_check_list); + } mutex_lock_nested(&ep->mtx, 0); if (is_file_epoll(tf.file)) { tep = tf.file->private_data; @@ -2222,8 +2225,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, error = ep_insert(ep, &epds, tf.file, fd, full_check); } else error = -EEXIST; - if (full_check) - clear_tfile_check_list(); break; case EPOLL_CTL_DEL: if (epi) @@ -2246,8 +2247,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_unlock(&ep->mtx); error_tgt_fput: - if (full_check) + if (full_check) { + clear_tfile_check_list(); mutex_unlock(&epmutex); + } fdput(tf); error_fput: diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 39c4772e96c9..d73103cdda21 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -93,8 +93,10 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) struct inode *inode = file_inode(vmf->vma->vm_file); struct ext2_inode_info *ei = EXT2_I(inode); vm_fault_t ret; + bool write = (vmf->flags & FAULT_FLAG_WRITE) && + (vmf->vma->vm_flags & VM_SHARED); - if (vmf->flags & FAULT_FLAG_WRITE) { + if (write) { sb_start_pagefault(inode->i_sb); file_update_time(vmf->vma->vm_file); } @@ -103,7 +105,7 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops); up_read(&ei->dax_sem); - if (vmf->flags & FAULT_FLAG_WRITE) + if (write) sb_end_pagefault(inode->i_sb); return ret; } diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index fda7d3f5b4be..432c3febea6d 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -80,6 +80,7 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir) if (dir) le16_add_cpu(&desc->bg_used_dirs_count, -1); spin_unlock(sb_bgl_lock(EXT2_SB(sb), group)); + percpu_counter_inc(&EXT2_SB(sb)->s_freeinodes_counter); if (dir) percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter); mark_buffer_dirty(bh); @@ -528,7 +529,7 @@ got: goto fail; } - percpu_counter_add(&sbi->s_freeinodes_counter, -1); + percpu_counter_dec(&sbi->s_freeinodes_counter); if (S_ISDIR(mode)) percpu_counter_inc(&sbi->s_dirs_counter); diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index ff8e1205127e..97c56d061e61 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -68,7 +68,7 @@ static int add_system_zone(struct ext4_system_blocks *system_blks, ext4_fsblk_t start_blk, unsigned int count) { - struct ext4_system_zone *new_entry = NULL, *entry; + struct ext4_system_zone *new_entry, *entry; struct rb_node **n = &system_blks->root.rb_node, *node; struct rb_node *parent = NULL, *new_node = NULL; @@ -79,30 +79,20 @@ static int add_system_zone(struct ext4_system_blocks *system_blks, n = &(*n)->rb_left; else if (start_blk >= (entry->start_blk + entry->count)) n = &(*n)->rb_right; - else { - if (start_blk + count > (entry->start_blk + - entry->count)) - entry->count = (start_blk + count - - entry->start_blk); - new_node = *n; - new_entry = rb_entry(new_node, struct ext4_system_zone, - node); - break; - } + else /* Unexpected overlap of system zones. */ + return -EFSCORRUPTED; } - if (!new_entry) { - new_entry = kmem_cache_alloc(ext4_system_zone_cachep, - GFP_KERNEL); - if (!new_entry) - return -ENOMEM; - new_entry->start_blk = start_blk; - new_entry->count = count; - new_node = &new_entry->node; + new_entry = kmem_cache_alloc(ext4_system_zone_cachep, + GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + new_entry->start_blk = start_blk; + new_entry->count = count; + new_node = &new_entry->node; - rb_link_node(new_node, parent, n); - rb_insert_color(new_node, &system_blks->root); - } + rb_link_node(new_node, parent, n); + rb_insert_color(new_node, &system_blks->root); /* Can we merge to the left? */ node = rb_prev(new_node); @@ -260,14 +250,6 @@ int ext4_setup_system_zone(struct super_block *sb) int flex_size = ext4_flex_bg_size(sbi); int ret; - if (!test_opt(sb, BLOCK_VALIDITY)) { - if (sbi->system_blks) - ext4_release_system_zone(sb); - return 0; - } - if (sbi->system_blks) - return 0; - system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL); if (!system_blks) return -ENOMEM; diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 2743c6f8a457..0589e914663f 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -677,6 +677,7 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len, struct qstr qstr = {.name = str, .len = len }; const struct dentry *parent = READ_ONCE(dentry->d_parent); const struct inode *inode = READ_ONCE(parent->d_inode); + char strbuf[DNAME_INLINE_LEN]; if (!inode || !IS_CASEFOLDED(inode) || !EXT4_SB(inode->i_sb)->s_encoding) { @@ -685,6 +686,21 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len, return memcmp(str, name->name, len); } + /* + * If the dentry name is stored in-line, then it may be concurrently + * modified by a rename. If this happens, the VFS will eventually retry + * the lookup, so it doesn't matter what ->d_compare() returns. + * However, it's unsafe to call utf8_strncasecmp() with an unstable + * string. Therefore, we have to copy the name into a temporary buffer. + */ + if (len <= DNAME_INLINE_LEN - 1) { + memcpy(strbuf, str, len); + strbuf[len] = 0; + qstr.name = strbuf; + /* prevent compiler from optimizing out the temporary buffer */ + barrier(); + } + return ext4_ci_compare(inode, name, &qstr, false); } diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index 98bd0e9ee7df..ca78fd709845 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -170,10 +170,13 @@ struct partial_cluster { (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) #define EXT_LAST_INDEX(__hdr__) \ (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) -#define EXT_MAX_EXTENT(__hdr__) \ - (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) +#define EXT_MAX_EXTENT(__hdr__) \ + ((le16_to_cpu((__hdr__)->eh_max)) ? \ + ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \ + : 0) #define EXT_MAX_INDEX(__hdr__) \ - (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) + ((le16_to_cpu((__hdr__)->eh_max)) ? \ + ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0) static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode) { diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 9bd44588eb77..3193f0b4a02d 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3010,7 +3010,7 @@ again: * in use to avoid freeing it when removing blocks. */ if (sbi->s_cluster_ratio > 1) { - pblk = ext4_ext_pblock(ex) + end - ee_block + 2; + pblk = ext4_ext_pblock(ex) + end - ee_block + 1; partial.pclu = EXT4_B2C(sbi, pblk); partial.state = nofree; } diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 5508baa11bb6..8a28d47bd502 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -44,30 +44,28 @@ */ static int ext4_sync_parent(struct inode *inode) { - struct dentry *dentry = NULL; - struct inode *next; + struct dentry *dentry, *next; int ret = 0; if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) return 0; - inode = igrab(inode); + dentry = d_find_any_alias(inode); + if (!dentry) + return 0; while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); - dentry = d_find_any_alias(inode); - if (!dentry) - break; - next = igrab(d_inode(dentry->d_parent)); + + next = dget_parent(dentry); dput(dentry); - if (!next) - break; - iput(inode); - inode = next; + dentry = next; + inode = dentry->d_inode; + /* * The directory inode may have gone through rmdir by now. But * the inode itself and its blocks are still allocated (we hold - * a reference to the inode so it didn't go through - * ext4_evict_inode()) and so we are safe to flush metadata - * blocks and the inode. + * a reference to the inode via its dentry), so it didn't go + * through ext4_evict_inode()) and so we are safe to flush + * metadata blocks and the inode. */ ret = sync_mapping_buffers(inode->i_mapping); if (ret) @@ -76,7 +74,7 @@ static int ext4_sync_parent(struct inode *inode) if (ret) break; } - iput(inode); + dput(dentry); return ret; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7e0c77de551b..a284d99a1ee5 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3836,6 +3836,11 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter) struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); ssize_t ret; + loff_t offset = iocb->ki_pos; + loff_t size = i_size_read(inode); + + if (offset >= size) + return 0; /* * Shared inode_lock is enough for us - it protects against concurrent diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index a564d0289a70..36a81b57012a 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1392,8 +1392,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, ext4_match(dir, fname, de)) { /* found a match - just to be sure, do * a full check */ - if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, - bh->b_size, offset)) + if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf, + buf_size, offset)) return -1; *res_dir = de; return 1; @@ -1852,7 +1852,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, blocksize, hinfo, map); map -= count; dx_sort_map(map, count); - /* Split the existing block in the middle, size-wise */ + /* Ensure that neither split block is over half full */ size = 0; move = 0; for (i = count-1; i >= 0; i--) { @@ -1862,8 +1862,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, size += map[i].size; move++; } - /* map index at which we will split */ - split = count - move; + /* + * map index at which we will split + * + * If the sum of active entries didn't exceed half the block size, just + * split it in half by count; each resulting block will have at least + * half the space free. + */ + if (i > 0) + split = count - move; + else + split = count/2; + hash2 = map[split].hash; continued = hash2 == map[split - 1].hash; dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n", @@ -2462,7 +2472,7 @@ int ext4_generic_delete_entry(handle_t *handle, de = (struct ext4_dir_entry_2 *)entry_buf; while (i < buf_size - csum_size) { if (ext4_check_dir_entry(dir, NULL, de, bh, - bh->b_data, bh->b_size, i)) + entry_buf, buf_size, i)) return -EFSCORRUPTED; if (de == de_del) { if (pde) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index d3500eaf900e..4aae7e3e89a1 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -66,10 +66,10 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *, unsigned long journal_devnum); static int ext4_show_options(struct seq_file *seq, struct dentry *root); static int ext4_commit_super(struct super_block *sb, int sync); -static void ext4_mark_recovery_complete(struct super_block *sb, +static int ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es); -static void ext4_clear_journal_err(struct super_block *sb, - struct ext4_super_block *es); +static int ext4_clear_journal_err(struct super_block *sb, + struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); @@ -2034,6 +2034,16 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, #endif } else if (token == Opt_dax) { #ifdef CONFIG_FS_DAX + if (is_remount && test_opt(sb, DAX)) { + ext4_msg(sb, KERN_ERR, "can't mount with " + "both data=journal and dax"); + return -1; + } + if (is_remount && !(sbi->s_mount_opt & EXT4_MOUNT_DAX)) { + ext4_msg(sb, KERN_ERR, "can't change " + "dax mount option while remounting"); + return -1; + } ext4_msg(sb, KERN_WARNING, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); sbi->s_mount_opt |= m->mount_opt; @@ -2294,6 +2304,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, ext4_msg(sb, KERN_ERR, "revision level too high, " "forcing read-only mode"); err = -EROFS; + goto done; } if (read_only) goto done; @@ -4552,11 +4563,13 @@ no_journal: ext4_set_resv_clusters(sb); - err = ext4_setup_system_zone(sb); - if (err) { - ext4_msg(sb, KERN_ERR, "failed to initialize system " - "zone (%d)", err); - goto failed_mount4a; + if (test_opt(sb, BLOCK_VALIDITY)) { + err = ext4_setup_system_zone(sb); + if (err) { + ext4_msg(sb, KERN_ERR, "failed to initialize system " + "zone (%d)", err); + goto failed_mount4a; + } } ext4_ext_init(sb); @@ -4624,7 +4637,9 @@ no_journal: EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; if (needs_recovery) { ext4_msg(sb, KERN_INFO, "recovery complete"); - ext4_mark_recovery_complete(sb, es); + err = ext4_mark_recovery_complete(sb, es); + if (err) + goto failed_mount8; } if (EXT4_SB(sb)->s_journal) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) @@ -4667,10 +4682,8 @@ cantfind_ext4: ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; -#ifdef CONFIG_QUOTA failed_mount8: ext4_unregister_sysfs(sb); -#endif failed_mount7: ext4_unregister_li_request(sb); failed_mount6: @@ -4809,7 +4822,8 @@ static journal_t *ext4_get_journal(struct super_block *sb, struct inode *journal_inode; journal_t *journal; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return NULL; journal_inode = ext4_get_journal_inode(sb, journal_inum); if (!journal_inode) @@ -4839,7 +4853,8 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, struct ext4_super_block *es; struct block_device *bdev; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return NULL; bdev = ext4_blkdev_get(j_dev, sb); if (bdev == NULL) @@ -4930,8 +4945,10 @@ static int ext4_load_journal(struct super_block *sb, dev_t journal_dev; int err = 0; int really_read_only; + int journal_dev_ro; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return -EFSCORRUPTED; if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { @@ -4941,7 +4958,31 @@ static int ext4_load_journal(struct super_block *sb, } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); - really_read_only = bdev_read_only(sb->s_bdev); + if (journal_inum && journal_dev) { + ext4_msg(sb, KERN_ERR, + "filesystem has both journal inode and journal device!"); + return -EINVAL; + } + + if (journal_inum) { + journal = ext4_get_journal(sb, journal_inum); + if (!journal) + return -EINVAL; + } else { + journal = ext4_get_dev_journal(sb, journal_dev); + if (!journal) + return -EINVAL; + } + + journal_dev_ro = bdev_read_only(journal->j_dev); + really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro; + + if (journal_dev_ro && !sb_rdonly(sb)) { + ext4_msg(sb, KERN_ERR, + "journal device read-only, try mounting with '-o ro'"); + err = -EROFS; + goto err_out; + } /* * Are we loading a blank journal or performing recovery after a @@ -4956,27 +4997,14 @@ static int ext4_load_journal(struct super_block *sb, ext4_msg(sb, KERN_ERR, "write access " "unavailable, cannot proceed " "(try mounting with noload)"); - return -EROFS; + err = -EROFS; + goto err_out; } ext4_msg(sb, KERN_INFO, "write access will " "be enabled during recovery"); } } - if (journal_inum && journal_dev) { - ext4_msg(sb, KERN_ERR, "filesystem has both journal " - "and inode journals!"); - return -EINVAL; - } - - if (journal_inum) { - if (!(journal = ext4_get_journal(sb, journal_inum))) - return -EINVAL; - } else { - if (!(journal = ext4_get_dev_journal(sb, journal_dev))) - return -EINVAL; - } - if (!(journal->j_flags & JBD2_BARRIER)) ext4_msg(sb, KERN_INFO, "barriers disabled"); @@ -4996,12 +5024,16 @@ static int ext4_load_journal(struct super_block *sb, if (err) { ext4_msg(sb, KERN_ERR, "error loading journal"); - jbd2_journal_destroy(journal); - return err; + goto err_out; } EXT4_SB(sb)->s_journal = journal; - ext4_clear_journal_err(sb, es); + err = ext4_clear_journal_err(sb, es); + if (err) { + EXT4_SB(sb)->s_journal = NULL; + jbd2_journal_destroy(journal); + return err; + } if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { @@ -5012,6 +5044,10 @@ static int ext4_load_journal(struct super_block *sb, } return 0; + +err_out: + jbd2_journal_destroy(journal); + return err; } static int ext4_commit_super(struct super_block *sb, int sync) @@ -5023,13 +5059,6 @@ static int ext4_commit_super(struct super_block *sb, int sync) if (!sbh || block_device_ejected(sb)) return error; - /* - * The superblock bh should be mapped, but it might not be if the - * device was hot-removed. Not much we can do but fail the I/O. - */ - if (!buffer_mapped(sbh)) - return error; - /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock @@ -5097,26 +5126,32 @@ static int ext4_commit_super(struct super_block *sb, int sync) * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ -static void ext4_mark_recovery_complete(struct super_block *sb, - struct ext4_super_block *es) +static int ext4_mark_recovery_complete(struct super_block *sb, + struct ext4_super_block *es) { + int err; journal_t *journal = EXT4_SB(sb)->s_journal; if (!ext4_has_feature_journal(sb)) { - BUG_ON(journal != NULL); - return; + if (journal != NULL) { + ext4_error(sb, "Journal got removed while the fs was " + "mounted!"); + return -EFSCORRUPTED; + } + return 0; } jbd2_journal_lock_updates(journal); - if (jbd2_journal_flush(journal) < 0) + err = jbd2_journal_flush(journal); + if (err < 0) goto out; if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) { ext4_clear_feature_journal_needs_recovery(sb); ext4_commit_super(sb, 1); } - out: jbd2_journal_unlock_updates(journal); + return err; } /* @@ -5124,14 +5159,17 @@ out: * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ -static void ext4_clear_journal_err(struct super_block *sb, +static int ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal; int j_errno; const char *errstr; - BUG_ON(!ext4_has_feature_journal(sb)); + if (!ext4_has_feature_journal(sb)) { + ext4_error(sb, "Journal got removed while the fs was mounted!"); + return -EFSCORRUPTED; + } journal = EXT4_SB(sb)->s_journal; @@ -5156,6 +5194,7 @@ static void ext4_clear_journal_err(struct super_block *sb, jbd2_journal_clear_err(journal); jbd2_journal_update_sb_errno(journal); } + return 0; } /* @@ -5298,7 +5337,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) { struct ext4_super_block *es; struct ext4_sb_info *sbi = EXT4_SB(sb); - unsigned long old_sb_flags; + unsigned long old_sb_flags, vfs_flags; struct ext4_mount_options old_opts; int enable_quota = 0; ext4_group_t g; @@ -5341,6 +5380,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (sbi->s_journal && sbi->s_journal->j_task->io_context) journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; + /* + * Some options can be enabled by ext4 and/or by VFS mount flag + * either way we need to make sure it matches in both *flags and + * s_flags. Copy those selected flags from *flags to s_flags + */ + vfs_flags = SB_LAZYTIME | SB_I_VERSION; + sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags); + if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { err = -EINVAL; goto restore_opts; @@ -5366,12 +5413,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) err = -EINVAL; goto restore_opts; } - if (test_opt(sb, DAX)) { - ext4_msg(sb, KERN_ERR, "can't mount with " - "both data=journal and dax"); - err = -EINVAL; - goto restore_opts; - } } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) { if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ext4_msg(sb, KERN_ERR, "can't mount with " @@ -5387,12 +5428,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } - if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) { - ext4_msg(sb, KERN_WARNING, "warning: refusing change of " - "dax flag with busy inodes while remounting"); - sbi->s_mount_opt ^= EXT4_MOUNT_DAX; - } - if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) ext4_abort(sb, "Abort forced by user"); @@ -5406,9 +5441,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); } - if (*flags & SB_LAZYTIME) - sb->s_flags |= SB_LAZYTIME; - if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) { if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) { err = -EROFS; @@ -5438,8 +5470,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) (sbi->s_mount_state & EXT4_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); - if (sbi->s_journal) + if (sbi->s_journal) { + /* + * We let remount-ro finish even if marking fs + * as clean failed... + */ ext4_mark_recovery_complete(sb, es); + } if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); } else { @@ -5487,8 +5524,11 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) * been changed by e2fsck since we originally mounted * the partition.) */ - if (sbi->s_journal) - ext4_clear_journal_err(sb, es); + if (sbi->s_journal) { + err = ext4_clear_journal_err(sb, es); + if (err) + goto restore_opts; + } sbi->s_mount_state = le16_to_cpu(es->s_state); err = ext4_setup_super(sb, es, 0); @@ -5518,7 +5558,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ext4_register_li_request(sb, first_not_zeroed); } - ext4_setup_system_zone(sb); + /* + * Handle creation of system zone data early because it can fail. + * Releasing of existing data is done when we are sure remount will + * succeed. + */ + if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) { + err = ext4_setup_system_zone(sb); + if (err) + goto restore_opts; + } + if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) { err = ext4_commit_super(sb, 1); if (err) @@ -5539,8 +5589,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } } #endif + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + ext4_release_system_zone(sb); + + /* + * Some options can be enabled by ext4 and/or by VFS mount flag + * either way we need to make sure it matches in both *flags and + * s_flags. Copy those selected flags from s_flags to *flags + */ + *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags); - *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); kfree(orig_data); return 0; @@ -5554,6 +5612,8 @@ restore_opts: sbi->s_commit_interval = old_opts.s_commit_interval; sbi->s_min_batch_time = old_opts.s_min_batch_time; sbi->s_max_batch_time = old_opts.s_max_batch_time; + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + ext4_release_system_zone(sb); #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < EXT4_MAXQUOTAS; i++) { diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 491f9ee4040e..894a61010ae9 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1820,8 +1820,11 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, if (EXT4_I(inode)->i_file_acl) { /* The inode already has an extended attribute block. */ bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); - if (IS_ERR(bs->bh)) - return PTR_ERR(bs->bh); + if (IS_ERR(bs->bh)) { + error = PTR_ERR(bs->bh); + bs->bh = NULL; + return error; + } ea_bdebug(bs->bh, "b_count=%d, refcount=%d", atomic_read(&(bs->bh->b_count)), le32_to_cpu(BHDR(bs->bh)->h_refcount)); diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index a28ffecc0f95..bbd07fe8a492 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -892,8 +892,8 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) int i; int err; - sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks), - GFP_KERNEL); + sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks), + GFP_KERNEL); if (!sbi->ckpt) return -ENOMEM; /* diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index ec9a1f9ce2dd..68be334afc28 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2753,6 +2753,9 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter, unsigned long align = offset | iov_iter_alignment(iter); struct block_device *bdev = inode->i_sb->s_bdev; + if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode)) + return 1; + if (align & blocksize_mask) { if (bdev) blkbits = blksize_bits(bdev_logical_block_size(bdev)); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 84280ad3786c..e9af46dc06f7 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -107,36 +107,28 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, /* * Test whether a case-insensitive directory entry matches the filename * being searched for. - * - * Returns: 0 if the directory entry matches, more than 0 if it - * doesn't match or less than zero on error. */ -int f2fs_ci_compare(const struct inode *parent, const struct qstr *name, - const struct qstr *entry, bool quick) +static bool f2fs_match_ci_name(const struct inode *dir, const struct qstr *name, + const struct qstr *entry, bool quick) { - const struct f2fs_sb_info *sbi = F2FS_SB(parent->i_sb); + const struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); const struct unicode_map *um = sbi->s_encoding; - int ret; + int res; if (quick) - ret = utf8_strncasecmp_folded(um, name, entry); + res = utf8_strncasecmp_folded(um, name, entry); else - ret = utf8_strncasecmp(um, name, entry); - - if (ret < 0) { - /* Handle invalid character sequence as either an error - * or as an opaque byte sequence. + res = utf8_strncasecmp(um, name, entry); + if (res < 0) { + /* + * In strict mode, ignore invalid names. In non-strict mode, + * fall back to treating them as opaque byte sequences. */ - if (f2fs_has_strict_mode(sbi)) - return -EINVAL; - - if (name->len != entry->len) - return 1; - - return !!memcmp(name->name, entry->name, name->len); + if (f2fs_has_strict_mode(sbi) || name->len != entry->len) + return false; + return !memcmp(name->name, entry->name, name->len); } - - return ret; + return res == 0; } static void f2fs_fname_setup_ci_filename(struct inode *dir, @@ -188,10 +180,10 @@ static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d, if (cf_str->name) { struct qstr cf = {.name = cf_str->name, .len = cf_str->len}; - return !f2fs_ci_compare(parent, &cf, &entry, true); + return f2fs_match_ci_name(parent, &cf, &entry, true); } - return !f2fs_ci_compare(parent, fname->usr_fname, &entry, - false); + return f2fs_match_ci_name(parent, fname->usr_fname, &entry, + false); } #endif if (fscrypt_match_name(fname, d->filename[bit_pos], @@ -1067,17 +1059,41 @@ const struct file_operations f2fs_dir_operations = { static int f2fs_d_compare(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { - struct qstr qstr = {.name = str, .len = len }; const struct dentry *parent = READ_ONCE(dentry->d_parent); - const struct inode *inode = READ_ONCE(parent->d_inode); + const struct inode *dir = READ_ONCE(parent->d_inode); + const struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); + struct qstr entry = QSTR_INIT(str, len); + char strbuf[DNAME_INLINE_LEN]; + int res; - if (!inode || !IS_CASEFOLDED(inode)) { - if (len != name->len) - return -1; - return memcmp(str, name->name, len); + if (!dir || !IS_CASEFOLDED(dir)) + goto fallback; + + /* + * If the dentry name is stored in-line, then it may be concurrently + * modified by a rename. If this happens, the VFS will eventually retry + * the lookup, so it doesn't matter what ->d_compare() returns. + * However, it's unsafe to call utf8_strncasecmp() with an unstable + * string. Therefore, we have to copy the name into a temporary buffer. + */ + if (len <= DNAME_INLINE_LEN - 1) { + memcpy(strbuf, str, len); + strbuf[len] = 0; + entry.name = strbuf; + /* prevent compiler from optimizing out the temporary buffer */ + barrier(); } - return f2fs_ci_compare(inode, name, &qstr, false); + res = utf8_strncasecmp(sbi->s_encoding, name, &entry); + if (res >= 0) + return res; + + if (f2fs_has_strict_mode(sbi)) + return -EINVAL; +fallback: + if (len != name->len) + return 1; + return !!memcmp(str, name->name, len); } static int f2fs_d_hash(const struct dentry *dentry, struct qstr *str) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 3edde3d6d089..b3b7e63394be 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -138,6 +138,7 @@ struct f2fs_mount_info { int alloc_mode; /* segment allocation policy */ int fsync_mode; /* fsync policy */ bool test_dummy_encryption; /* test dummy encryption */ + block_t unusable_cap_perc; /* percentage for cap */ block_t unusable_cap; /* Amount of space allowed to be * unusable when disabling checkpoint */ @@ -2789,18 +2790,12 @@ static inline bool f2fs_may_extent_tree(struct inode *inode) static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, size_t size, gfp_t flags) { - void *ret; - if (time_to_inject(sbi, FAULT_KMALLOC)) { f2fs_show_injection_info(FAULT_KMALLOC); return NULL; } - ret = kmalloc(size, flags); - if (ret) - return ret; - - return kvmalloc(size, flags); + return kmalloc(size, flags); } static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, @@ -2959,11 +2954,6 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, bool hot, bool set); struct dentry *f2fs_get_parent(struct dentry *child); -extern int f2fs_ci_compare(const struct inode *parent, - const struct qstr *name, - const struct qstr *entry, - bool quick); - /* * dir.c */ @@ -3071,7 +3061,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); -void f2fs_recover_inline_xattr(struct inode *inode, struct page *page); +int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); int f2fs_recover_xattr_data(struct inode *inode, struct page *page); int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, @@ -3497,7 +3487,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page); int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); int f2fs_convert_inline_inode(struct inode *inode); int f2fs_write_inline_data(struct inode *inode, struct page *page); -bool f2fs_recover_inline_data(struct inode *inode, struct page *npage); +int f2fs_recover_inline_data(struct inode *inode, struct page *npage); struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, struct fscrypt_name *fname, struct page **res_page); int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index c3a9da79ac99..5d94abe467a4 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -2056,8 +2056,15 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) if (in != F2FS_GOING_DOWN_FULLSYNC) { ret = mnt_want_write_file(filp); - if (ret) + if (ret) { + if (ret == -EROFS) { + ret = 0; + f2fs_stop_checkpoint(sbi, false); + set_sbi_flag(sbi, SBI_IS_SHUTDOWN); + trace_f2fs_shutdown(sbi, in, ret); + } return ret; + } } switch (in) { diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 896db0416f0e..183388393c6a 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -252,7 +252,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page) return 0; } -bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) +int f2fs_recover_inline_data(struct inode *inode, struct page *npage) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode *ri = NULL; @@ -274,7 +274,8 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) ri && (ri->i_inline & F2FS_INLINE_DATA)) { process_inline: ipage = f2fs_get_node_page(sbi, inode->i_ino); - f2fs_bug_on(sbi, IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); f2fs_wait_on_page_writeback(ipage, NODE, true, true); @@ -287,21 +288,25 @@ process_inline: set_page_dirty(ipage); f2fs_put_page(ipage, 1); - return true; + return 1; } if (f2fs_has_inline_data(inode)) { ipage = f2fs_get_node_page(sbi, inode->i_ino); - f2fs_bug_on(sbi, IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); f2fs_truncate_inline_inode(inode, ipage, 0); clear_inode_flag(inode, FI_INLINE_DATA); f2fs_put_page(ipage, 1); } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { - if (f2fs_truncate_blocks(inode, 0, false)) - return false; + int ret; + + ret = f2fs_truncate_blocks(inode, 0, false); + if (ret) + return ret; goto process_inline; } - return false; + return 0; } struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index f14401a77d60..8a67b933ccd4 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2315,6 +2315,9 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, if (unlikely(nid >= nm_i->max_nid)) nid = 0; + if (unlikely(nid % NAT_ENTRY_PER_BLOCK)) + nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK; + /* Enough entries */ if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) return 0; @@ -2512,7 +2515,7 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) return nr - nr_shrink; } -void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) +int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) { void *src_addr, *dst_addr; size_t inline_size; @@ -2520,7 +2523,8 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) struct f2fs_inode *ri; ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); - f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); ri = F2FS_INODE(page); if (ri->i_inline & F2FS_INLINE_XATTR) { @@ -2539,6 +2543,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) update_inode: f2fs_update_inode(inode, ipage); f2fs_put_page(ipage, 1); + return 0; } int f2fs_recover_xattr_data(struct inode *inode, struct page *page) @@ -2933,7 +2938,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) return 0; nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); - nm_i->nat_bits = f2fs_kzalloc(sbi, + nm_i->nat_bits = f2fs_kvzalloc(sbi, nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); if (!nm_i->nat_bits) return -ENOMEM; @@ -3066,9 +3071,9 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi) int i; nm_i->free_nid_bitmap = - f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *), - nm_i->nat_blocks), - GFP_KERNEL); + f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), + nm_i->nat_blocks), + GFP_KERNEL); if (!nm_i->free_nid_bitmap) return -ENOMEM; diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 783773e4560d..5f230e981c48 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -514,7 +514,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, /* step 1: recover xattr */ if (IS_INODE(page)) { - f2fs_recover_inline_xattr(inode, page); + err = f2fs_recover_inline_xattr(inode, page); + if (err) + goto out; } else if (f2fs_has_xattr_block(ofs_of_node(page))) { err = f2fs_recover_xattr_data(inode, page); if (!err) @@ -523,8 +525,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, } /* step 2: recover inline data */ - if (f2fs_recover_inline_data(inode, page)) + err = f2fs_recover_inline_data(inode, page); + if (err) { + if (err == 1) + err = 0; goto out; + } /* step 3: recover data indices */ start = f2fs_start_bidx_of_node(ofs_of_node(page), inode); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 5e1d4d9243a9..fa461db696e7 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -277,6 +277,22 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).s_resgid)); } +static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi) +{ + if (!F2FS_OPTION(sbi).unusable_cap_perc) + return; + + if (F2FS_OPTION(sbi).unusable_cap_perc == 100) + F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count; + else + F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) * + F2FS_OPTION(sbi).unusable_cap_perc; + + f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%", + F2FS_OPTION(sbi).unusable_cap, + F2FS_OPTION(sbi).unusable_cap_perc); +} + static void init_once(void *foo) { struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; @@ -790,12 +806,7 @@ static int parse_options(struct super_block *sb, char *options) return -EINVAL; if (arg < 0 || arg > 100) return -EINVAL; - if (arg == 100) - F2FS_OPTION(sbi).unusable_cap = - sbi->user_block_count; - else - F2FS_OPTION(sbi).unusable_cap = - (sbi->user_block_count / 100) * arg; + F2FS_OPTION(sbi).unusable_cap_perc = arg; set_opt(sbi, DISABLE_CHECKPOINT); break; case Opt_checkpoint_disable_cap: @@ -1064,6 +1075,9 @@ static void f2fs_put_super(struct super_block *sb) int i; bool dropped; + /* unregister procfs/sysfs entries in advance to avoid race case */ + f2fs_unregister_sysfs(sbi); + f2fs_quota_off_umount(sb); /* prevent remaining shrinker jobs */ @@ -1127,8 +1141,6 @@ static void f2fs_put_super(struct super_block *sb) kvfree(sbi->ckpt); - f2fs_unregister_sysfs(sbi); - sb->s_fs_info = NULL; if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); @@ -1219,7 +1231,8 @@ static int f2fs_statfs_project(struct super_block *sb, limit >>= sb->s_blocksize_bits; if (limit && buf->f_blocks > limit) { - curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits; + curblock = (dquot->dq_dqb.dqb_curspace + + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; buf->f_blocks = limit; buf->f_bfree = buf->f_bavail = (buf->f_blocks > curblock) ? @@ -1735,6 +1748,7 @@ skip: (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); limit_reserve_root(sbi); + adjust_unusable_cap_perc(sbi); *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); return 0; restore_gc: @@ -2888,7 +2902,7 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) if (nr_sectors & (bdev_zone_sectors(bdev) - 1)) FDEV(devi).nr_blkz++; - FDEV(devi).blkz_seq = f2fs_kzalloc(sbi, + FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi, BITS_TO_LONGS(FDEV(devi).nr_blkz) * sizeof(unsigned long), GFP_KERNEL); @@ -3397,6 +3411,7 @@ try_onemore: sbi->reserved_blocks = 0; sbi->current_reserved_blocks = 0; limit_reserve_root(sbi); + adjust_unusable_cap_perc(sbi); for (i = 0; i < NR_INODE_TYPE; i++) { INIT_LIST_HEAD(&sbi->inode_list[i]); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 76ac9c7d32ec..5f6400ba82c0 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -42,7 +42,6 @@ struct wb_writeback_work { long nr_pages; struct super_block *sb; - unsigned long *older_than_this; enum writeback_sync_modes sync_mode; unsigned int tagged_writepages:1; unsigned int for_kupdate:1; @@ -144,7 +143,9 @@ static void inode_io_list_del_locked(struct inode *inode, struct bdi_writeback *wb) { assert_spin_locked(&wb->list_lock); + assert_spin_locked(&inode->i_lock); + inode->i_state &= ~I_SYNC_QUEUED; list_del_init(&inode->i_io_list); wb_io_lists_depopulated(wb); } @@ -1123,7 +1124,9 @@ void inode_io_list_del(struct inode *inode) struct bdi_writeback *wb; wb = inode_to_wb_and_lock_list(inode); + spin_lock(&inode->i_lock); inode_io_list_del_locked(inode, wb); + spin_unlock(&inode->i_lock); spin_unlock(&wb->list_lock); } @@ -1172,8 +1175,10 @@ void sb_clear_inode_writeback(struct inode *inode) * the case then the inode must have been redirtied while it was being written * out and we don't reset its dirtied_when. */ -static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) +static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) { + assert_spin_locked(&inode->i_lock); + if (!list_empty(&wb->b_dirty)) { struct inode *tail; @@ -1182,6 +1187,14 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) inode->dirtied_when = jiffies; } inode_io_list_move_locked(inode, wb, &wb->b_dirty); + inode->i_state &= ~I_SYNC_QUEUED; +} + +static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) +{ + spin_lock(&inode->i_lock); + redirty_tail_locked(inode, wb); + spin_unlock(&inode->i_lock); } /* @@ -1220,16 +1233,13 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) #define EXPIRE_DIRTY_ATIME 0x0001 /* - * Move expired (dirtied before work->older_than_this) dirty inodes from + * Move expired (dirtied before dirtied_before) dirty inodes from * @delaying_queue to @dispatch_queue. */ static int move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, - int flags, - struct wb_writeback_work *work) + int flags, unsigned long dirtied_before) { - unsigned long *older_than_this = NULL; - unsigned long expire_time; LIST_HEAD(tmp); struct list_head *pos, *node; struct super_block *sb = NULL; @@ -1237,21 +1247,17 @@ static int move_expired_inodes(struct list_head *delaying_queue, int do_sb_sort = 0; int moved = 0; - if ((flags & EXPIRE_DIRTY_ATIME) == 0) - older_than_this = work->older_than_this; - else if (!work->for_sync) { - expire_time = jiffies - (dirtytime_expire_interval * HZ); - older_than_this = &expire_time; - } while (!list_empty(delaying_queue)) { inode = wb_inode(delaying_queue->prev); - if (older_than_this && - inode_dirtied_after(inode, *older_than_this)) + if (inode_dirtied_after(inode, dirtied_before)) break; list_move(&inode->i_io_list, &tmp); moved++; + spin_lock(&inode->i_lock); if (flags & EXPIRE_DIRTY_ATIME) - set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); + inode->i_state |= I_DIRTY_TIME_EXPIRED; + inode->i_state |= I_SYNC_QUEUED; + spin_unlock(&inode->i_lock); if (sb_is_blkdev_sb(inode->i_sb)) continue; if (sb && sb != inode->i_sb) @@ -1289,18 +1295,22 @@ out: * | * +--> dequeue for IO */ -static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) +static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, + unsigned long dirtied_before) { int moved; + unsigned long time_expire_jif = dirtied_before; assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, dirtied_before); + if (!work->for_sync) + time_expire_jif = jiffies - dirtytime_expire_interval * HZ; moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, - EXPIRE_DIRTY_ATIME, work); + EXPIRE_DIRTY_ATIME, time_expire_jif); if (moved) wb_io_lists_populated(wb); - trace_writeback_queue_io(wb, work, moved); + trace_writeback_queue_io(wb, work, dirtied_before, moved); } static int write_inode(struct inode *inode, struct writeback_control *wbc) @@ -1394,7 +1404,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * writeback is not making progress due to locked * buffers. Skip this inode for now. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); return; } @@ -1414,7 +1424,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * retrying writeback of the dirty page/inode * that cannot be performed immediately. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); } } else if (inode->i_state & I_DIRTY) { /* @@ -1422,10 +1432,11 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * such as delayed allocation during submission or metadata * updates after data IO completion. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); } else if (inode->i_state & I_DIRTY_TIME) { inode->dirtied_when = jiffies; inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); + inode->i_state &= ~I_SYNC_QUEUED; } else { /* The inode is clean. Remove from writeback lists. */ inode_io_list_del_locked(inode, wb); @@ -1669,8 +1680,8 @@ static long writeback_sb_inodes(struct super_block *sb, */ spin_lock(&inode->i_lock); if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { + redirty_tail_locked(inode, wb); spin_unlock(&inode->i_lock); - redirty_tail(inode, wb); continue; } if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { @@ -1811,7 +1822,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, blk_start_plug(&plug); spin_lock(&wb->list_lock); if (list_empty(&wb->b_io)) - queue_io(wb, &work); + queue_io(wb, &work, jiffies); __writeback_inodes_wb(wb, &work); spin_unlock(&wb->list_lock); blk_finish_plug(&plug); @@ -1831,7 +1842,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, * takes longer than a dirty_writeback_interval interval, then leave a * one-second gap. * - * older_than_this takes precedence over nr_to_write. So we'll only write back + * dirtied_before takes precedence over nr_to_write. So we'll only write back * all dirty pages if they are all attached to "old" mappings. */ static long wb_writeback(struct bdi_writeback *wb, @@ -1839,14 +1850,11 @@ static long wb_writeback(struct bdi_writeback *wb, { unsigned long wb_start = jiffies; long nr_pages = work->nr_pages; - unsigned long oldest_jif; + unsigned long dirtied_before = jiffies; struct inode *inode; long progress; struct blk_plug plug; - oldest_jif = jiffies; - work->older_than_this = &oldest_jif; - blk_start_plug(&plug); spin_lock(&wb->list_lock); for (;;) { @@ -1880,14 +1888,14 @@ static long wb_writeback(struct bdi_writeback *wb, * safe. */ if (work->for_kupdate) { - oldest_jif = jiffies - + dirtied_before = jiffies - msecs_to_jiffies(dirty_expire_interval * 10); } else if (work->for_background) - oldest_jif = jiffies; + dirtied_before = jiffies; trace_writeback_start(wb, work); if (list_empty(&wb->b_io)) - queue_io(wb, work); + queue_io(wb, work, dirtied_before); if (work->sb) progress = writeback_sb_inodes(work->sb, wb, work); else @@ -2289,11 +2297,12 @@ void __mark_inode_dirty(struct inode *inode, int flags) inode->i_state |= flags; /* - * If the inode is being synced, just update its dirty state. - * The unlocker will place the inode on the appropriate - * superblock list, based upon its state. + * If the inode is queued for writeback by flush worker, just + * update its dirty state. Once the flush worker is done with + * the inode it will place it on the appropriate superblock + * list, based upon its state. */ - if (inode->i_state & I_SYNC) + if (inode->i_state & I_SYNC_QUEUED) goto out_unlock_inode; /* diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 4f2e4f38feb8..06dd38e76c62 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -771,7 +771,8 @@ static int fuse_check_page(struct page *page) 1 << PG_uptodate | 1 << PG_lru | 1 << PG_active | - 1 << PG_reclaim))) { + 1 << PG_reclaim | + 1 << PG_waiters))) { pr_warn("trying to steal weird page\n"); pr_warn(" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); return 1; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 3dd37a998ea9..f8d8a8e34b80 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -18,6 +18,7 @@ #include #include #include +#include static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags, struct fuse_page_desc **desc) @@ -712,6 +713,7 @@ static ssize_t fuse_async_req_send(struct fuse_conn *fc, spin_unlock(&io->lock); ia->ap.args.end = fuse_aio_complete_req; + ia->ap.args.may_block = io->should_dirty; err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL); if (err) fuse_aio_complete_req(fc, &ia->ap.args, err); @@ -2147,10 +2149,8 @@ static int fuse_writepages(struct address_space *mapping, err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); if (data.wpa) { - /* Ignore errors if we can write at least one page */ WARN_ON(!data.wpa->ia.ap.num_pages); fuse_writepages_send(&data); - err = 0; } if (data.ff) fuse_file_put(data.ff, false, false); @@ -2759,7 +2759,16 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, struct iovec *iov = iov_page; iov->iov_base = (void __user *)arg; - iov->iov_len = _IOC_SIZE(cmd); + + switch (cmd) { + case FS_IOC_GETFLAGS: + case FS_IOC_SETFLAGS: + iov->iov_len = sizeof(int); + break; + default: + iov->iov_len = _IOC_SIZE(cmd); + break; + } if (_IOC_DIR(cmd) & _IOC_WRITE) { in_iov = iov; @@ -3279,13 +3288,11 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) return -EXDEV; - if (fc->writeback_cache) { - inode_lock(inode_in); - err = fuse_writeback_range(inode_in, pos_in, pos_in + len); - inode_unlock(inode_in); - if (err) - return err; - } + inode_lock(inode_in); + err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1); + inode_unlock(inode_in); + if (err) + return err; inode_lock(inode_out); @@ -3293,11 +3300,27 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, if (err) goto out; - if (fc->writeback_cache) { - err = fuse_writeback_range(inode_out, pos_out, pos_out + len); - if (err) - goto out; - } + /* + * Write out dirty pages in the destination file before sending the COPY + * request to userspace. After the request is completed, truncate off + * pages (including partial ones) from the cache that have been copied, + * since these contain stale data at that point. + * + * This should be mostly correct, but if the COPY writes to partial + * pages (at the start or end) and the parts not covered by the COPY are + * written through a memory map after calling fuse_writeback_range(), + * then these partial page modifications will be lost on truncation. + * + * It is unlikely that someone would rely on such mixed style + * modifications. Yet this does give less guarantees than if the + * copying was performed with write(2). + * + * To fix this a i_mmap_sem style lock could be used to prevent new + * faults while the copy is ongoing. + */ + err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1); + if (err) + goto out; if (is_unstable) set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); @@ -3318,6 +3341,10 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, if (err) goto out; + truncate_inode_pages_range(inode_out->i_mapping, + ALIGN_DOWN(pos_out, PAGE_SIZE), + ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1); + if (fc->writeback_cache) { fuse_write_update_size(inode_out, pos_out + outarg.size); file_update_time(file_out); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index ca344bf71404..d7cde216fc87 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -249,6 +249,7 @@ struct fuse_args { bool out_argvar:1; bool page_zeroing:1; bool page_replace:1; + bool may_block:1; struct fuse_in_arg in_args[3]; struct fuse_arg out_args[2]; void (*end)(struct fuse_conn *fc, struct fuse_args *args, int error); diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 16aec32f7f3d..5dca643a257c 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -121,10 +121,12 @@ static void fuse_evict_inode(struct inode *inode) } } -static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) +static int fuse_reconfigure(struct fs_context *fc) { + struct super_block *sb = fc->root->d_sb; + sync_filesystem(sb); - if (*flags & SB_MANDLOCK) + if (fc->sb_flags & SB_MANDLOCK) return -EINVAL; return 0; @@ -473,6 +475,13 @@ static int fuse_parse_param(struct fs_context *fc, struct fs_parameter *param) struct fuse_fs_context *ctx = fc->fs_private; int opt; + /* + * Ignore options coming from mount(MS_REMOUNT) for backward + * compatibility. + */ + if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) + return 0; + opt = fs_parse(fc, &fuse_fs_parameters, param, &result); if (opt < 0) return opt; @@ -815,7 +824,6 @@ static const struct super_operations fuse_super_operations = { .evict_inode = fuse_evict_inode, .write_inode = fuse_write_inode, .drop_inode = generic_delete_inode, - .remount_fs = fuse_remount_fs, .put_super = fuse_put_super, .umount_begin = fuse_umount_begin, .statfs = fuse_statfs, @@ -1289,6 +1297,7 @@ static int fuse_get_tree(struct fs_context *fc) static const struct fs_context_operations fuse_context_ops = { .free = fuse_free_fc, .parse_param = fuse_parse_param, + .reconfigure = fuse_reconfigure, .get_tree = fuse_get_tree, }; diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index a5c86048b96e..7505f8102762 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -55,6 +55,12 @@ struct virtio_fs_forget { struct list_head list; }; +struct virtio_fs_req_work { + struct fuse_req *req; + struct virtio_fs_vq *fsvq; + struct work_struct done_work; +}; + static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, struct fuse_req *req, bool in_flight); @@ -443,19 +449,67 @@ static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req) } /* Work function for request completion */ +static void virtio_fs_request_complete(struct fuse_req *req, + struct virtio_fs_vq *fsvq) +{ + struct fuse_pqueue *fpq = &fsvq->fud->pq; + struct fuse_conn *fc = fsvq->fud->fc; + struct fuse_args *args; + struct fuse_args_pages *ap; + unsigned int len, i, thislen; + struct page *page; + + /* + * TODO verify that server properly follows FUSE protocol + * (oh.uniq, oh.len) + */ + args = req->args; + copy_args_from_argbuf(args, req); + + if (args->out_pages && args->page_zeroing) { + len = args->out_args[args->out_numargs - 1].size; + ap = container_of(args, typeof(*ap), args); + for (i = 0; i < ap->num_pages; i++) { + thislen = ap->descs[i].length; + if (len < thislen) { + WARN_ON(ap->descs[i].offset); + page = ap->pages[i]; + zero_user_segment(page, len, thislen); + len = 0; + } else { + len -= thislen; + } + } + } + + spin_lock(&fpq->lock); + clear_bit(FR_SENT, &req->flags); + spin_unlock(&fpq->lock); + + fuse_request_end(fc, req); + spin_lock(&fsvq->lock); + dec_in_flight_req(fsvq); + spin_unlock(&fsvq->lock); +} + +static void virtio_fs_complete_req_work(struct work_struct *work) +{ + struct virtio_fs_req_work *w = + container_of(work, typeof(*w), done_work); + + virtio_fs_request_complete(w->req, w->fsvq); + kfree(w); +} + static void virtio_fs_requests_done_work(struct work_struct *work) { struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, done_work); struct fuse_pqueue *fpq = &fsvq->fud->pq; - struct fuse_conn *fc = fsvq->fud->fc; struct virtqueue *vq = fsvq->vq; struct fuse_req *req; - struct fuse_args_pages *ap; struct fuse_req *next; - struct fuse_args *args; - unsigned int len, i, thislen; - struct page *page; + unsigned int len; LIST_HEAD(reqs); /* Collect completed requests off the virtqueue */ @@ -473,38 +527,20 @@ static void virtio_fs_requests_done_work(struct work_struct *work) /* End requests */ list_for_each_entry_safe(req, next, &reqs, list) { - /* - * TODO verify that server properly follows FUSE protocol - * (oh.uniq, oh.len) - */ - args = req->args; - copy_args_from_argbuf(args, req); - - if (args->out_pages && args->page_zeroing) { - len = args->out_args[args->out_numargs - 1].size; - ap = container_of(args, typeof(*ap), args); - for (i = 0; i < ap->num_pages; i++) { - thislen = ap->descs[i].length; - if (len < thislen) { - WARN_ON(ap->descs[i].offset); - page = ap->pages[i]; - zero_user_segment(page, len, thislen); - len = 0; - } else { - len -= thislen; - } - } - } - - spin_lock(&fpq->lock); - clear_bit(FR_SENT, &req->flags); list_del_init(&req->list); - spin_unlock(&fpq->lock); - fuse_request_end(fc, req); - spin_lock(&fsvq->lock); - dec_in_flight_req(fsvq); - spin_unlock(&fsvq->lock); + /* blocking async request completes in a worker context */ + if (req->args->may_block) { + struct virtio_fs_req_work *w; + + w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL); + INIT_WORK(&w->done_work, virtio_fs_complete_req_work); + w->fsvq = fsvq; + w->req = req; + schedule_work(&w->done_work); + } else { + virtio_fs_request_complete(req, fsvq); + } } } diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index adbb8fef2216..50fa3e08c02f 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1350,9 +1350,15 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi return ret; } +/* + * NOTE: Never call gfs2_block_zero_range with an open transaction because it + * uses iomap write to perform its actions, which begin their own transactions + * (iomap_begin, page_prepare, etc.) + */ static int gfs2_block_zero_range(struct inode *inode, loff_t from, unsigned int length) { + BUG_ON(current->journal_info); return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops); } @@ -1413,6 +1419,16 @@ static int trunc_start(struct inode *inode, u64 newsize) u64 oldsize = inode->i_size; int error; + if (!gfs2_is_stuffed(ip)) { + unsigned int blocksize = i_blocksize(inode); + unsigned int offs = newsize & (blocksize - 1); + if (offs) { + error = gfs2_block_zero_range(inode, newsize, + blocksize - offs); + if (error) + return error; + } + } if (journaled) error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES); else @@ -1426,19 +1442,10 @@ static int trunc_start(struct inode *inode, u64 newsize) gfs2_trans_add_meta(ip->i_gl, dibh); - if (gfs2_is_stuffed(ip)) { + if (gfs2_is_stuffed(ip)) gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize); - } else { - unsigned int blocksize = i_blocksize(inode); - unsigned int offs = newsize & (blocksize - 1); - if (offs) { - error = gfs2_block_zero_range(inode, newsize, - blocksize - offs); - if (error) - goto out; - } + else ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG; - } i_size_write(inode, newsize); ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); @@ -2442,24 +2449,13 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length) struct inode *inode = file_inode(file); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); + unsigned int blocksize = i_blocksize(inode); + loff_t start, end; int error; - if (gfs2_is_jdata(ip)) - error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA, - GFS2_JTRUNC_REVOKES); - else - error = gfs2_trans_begin(sdp, RES_DINODE, 0); - if (error) - return error; + if (!gfs2_is_stuffed(ip)) { + unsigned int start_off, end_len; - if (gfs2_is_stuffed(ip)) { - error = stuffed_zero_range(inode, offset, length); - if (error) - goto out; - } else { - unsigned int start_off, end_len, blocksize; - - blocksize = i_blocksize(inode); start_off = offset & (blocksize - 1); end_len = (offset + length) & (blocksize - 1); if (start_off) { @@ -2480,6 +2476,26 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length) } } + start = round_down(offset, blocksize); + end = round_up(offset + length, blocksize) - 1; + error = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (error) + return error; + + if (gfs2_is_jdata(ip)) + error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA, + GFS2_JTRUNC_REVOKES); + else + error = gfs2_trans_begin(sdp, RES_DINODE, 0); + if (error) + return error; + + if (gfs2_is_stuffed(ip)) { + error = stuffed_zero_range(inode, offset, length); + if (error) + goto out; + } + if (gfs2_is_jdata(ip)) { BUG_ON(!current->journal_info); gfs2_journaled_truncate_range(inode, offset, length); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 83cf64da474c..d5b9274662db 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -87,6 +87,8 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) memset(&tr, 0, sizeof(tr)); INIT_LIST_HEAD(&tr.tr_buf); INIT_LIST_HEAD(&tr.tr_databuf); + INIT_LIST_HEAD(&tr.tr_ail1_list); + INIT_LIST_HEAD(&tr.tr_ail2_list); tr.tr_revokes = atomic_read(&gl->gl_ail_count); if (!tr.tr_revokes) { diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 110e5c4db819..00a90fc72597 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -810,8 +810,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) tr = sdp->sd_log_tr; if (tr) { sdp->sd_log_tr = NULL; - INIT_LIST_HEAD(&tr->tr_ail1_list); - INIT_LIST_HEAD(&tr->tr_ail2_list); tr->tr_first = sdp->sd_log_flush_head; if (unlikely (state == SFS_FROZEN)) gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new); @@ -881,8 +879,10 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) * @new: New transaction to be merged */ -static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) +static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new) { + struct gfs2_trans *old = sdp->sd_log_tr; + WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags)); old->tr_num_buf_new += new->tr_num_buf_new; @@ -893,6 +893,11 @@ static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) list_splice_tail_init(&new->tr_databuf, &old->tr_databuf); list_splice_tail_init(&new->tr_buf, &old->tr_buf); + + spin_lock(&sdp->sd_ail_lock); + list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list); + list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list); + spin_unlock(&sdp->sd_ail_lock); } static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) @@ -904,7 +909,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) gfs2_log_lock(sdp); if (sdp->sd_log_tr) { - gfs2_merge_trans(sdp->sd_log_tr, tr); + gfs2_merge_trans(sdp, tr); } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); sdp->sd_log_tr = tr; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 18daf494abab..e0c55765b06d 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -911,7 +911,7 @@ fail: } static const match_table_t nolock_tokens = { - { Opt_jid, "jid=%d\n", }, + { Opt_jid, "jid=%d", }, { Opt_err, NULL }, }; @@ -1168,7 +1168,17 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) goto fail_per_node; } - if (!sb_rdonly(sb)) { + if (sb_rdonly(sb)) { + struct gfs2_holder freeze_gh; + + error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, + GL_EXACT, &freeze_gh); + if (error) { + fs_err(sdp, "can't make FS RO: %d\n", error); + goto fail_per_node; + } + gfs2_glock_dq_uninit(&freeze_gh); + } else { error = gfs2_make_fs_rw(sdp); if (error) { fs_err(sdp, "can't make FS RW: %d\n", error); diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 9d4227330de4..2a12d30ae0de 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -53,6 +53,8 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, sizeof(u64)); INIT_LIST_HEAD(&tr->tr_databuf); INIT_LIST_HEAD(&tr->tr_buf); + INIT_LIST_HEAD(&tr->tr_ail1_list); + INIT_LIST_HEAD(&tr->tr_ail2_list); sb_start_intwrite(sdp->sd_vfs); diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 40306c1eab07..5fff7cb3582f 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1284,6 +1284,12 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_magic = HUGETLBFS_MAGIC; sb->s_op = &hugetlbfs_ops; sb->s_time_gran = 1; + + /* + * Due to the special and limited functionality of hugetlbfs, it does + * not work well as a stacking filesystem. + */ + sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); if (!sb->s_root) goto out_free; diff --git a/fs/io_uring.c b/fs/io_uring.c index 7fa3cd3fff4d..2a539b794f3b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -267,6 +267,9 @@ struct io_ring_ctx { #if defined(CONFIG_UNIX) struct socket *ring_sock; #endif + + struct list_head task_list; + spinlock_t task_lock; }; struct sqe_submit { @@ -276,6 +279,7 @@ struct sqe_submit { bool has_user; bool needs_lock; bool needs_fixed_file; + u8 opcode; }; /* @@ -331,14 +335,18 @@ struct io_kiocb { #define REQ_F_ISREG 2048 /* regular file */ #define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */ #define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */ +#define REQ_F_CANCEL 16384 /* cancel request */ unsigned long fsize; u64 user_data; u32 result; u32 sequence; + struct task_struct *task; struct fs_struct *fs; struct work_struct work; + struct task_struct *work_task; + struct list_head task_list; }; #define IO_PLUG_THRESHOLD 2 @@ -425,6 +433,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->cancel_list); INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->timeout_list); + INIT_LIST_HEAD(&ctx->task_list); + spin_lock_init(&ctx->task_lock); return ctx; } @@ -492,10 +502,11 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) static inline void io_queue_async_work(struct io_ring_ctx *ctx, struct io_kiocb *req) { + unsigned long flags; int rw = 0; if (req->submit.sqe) { - switch (req->submit.sqe->opcode) { + switch (req->submit.opcode) { case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: rw = !(req->rw.ki_flags & IOCB_DIRECT); @@ -503,6 +514,13 @@ static inline void io_queue_async_work(struct io_ring_ctx *ctx, } } + req->task = current; + + spin_lock_irqsave(&ctx->task_lock, flags); + list_add(&req->task_list, &ctx->task_list); + req->work_task = NULL; + spin_unlock_irqrestore(&ctx->task_lock, flags); + queue_work(ctx->sqo_wq[rw], &req->work); } @@ -1237,23 +1255,15 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw, } static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw, - const struct sqe_submit *s, struct iovec **iovec, + struct io_kiocb *req, struct iovec **iovec, struct iov_iter *iter) { - const struct io_uring_sqe *sqe = s->sqe; + const struct io_uring_sqe *sqe = req->submit.sqe; void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); size_t sqe_len = READ_ONCE(sqe->len); u8 opcode; - /* - * We're reading ->opcode for the second time, but the first read - * doesn't care whether it's _FIXED or not, so it doesn't matter - * whether ->opcode changes concurrently. The first read does care - * about whether it is a READ or a WRITE, so we don't trust this read - * for that purpose and instead let the caller pass in the read/write - * flag. - */ - opcode = READ_ONCE(sqe->opcode); + opcode = req->submit.opcode; if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { ssize_t ret = io_import_fixed(ctx, rw, sqe, iter); @@ -1261,7 +1271,7 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw, return ret; } - if (!s->has_user) + if (!req->submit.has_user) return -EFAULT; #ifdef CONFIG_COMPAT @@ -1408,7 +1418,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, if (unlikely(!(file->f_mode & FMODE_READ))) return -EBADF; - ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter); + ret = io_import_iovec(req->ctx, READ, req, &iovec, &iter); if (ret < 0) return ret; @@ -1423,8 +1433,10 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, if (file->f_op->read_iter) ret2 = call_read_iter(file, kiocb, &iter); - else + else if (req->file->f_op->read) ret2 = loop_rw_iter(READ, file, kiocb, &iter); + else + ret2 = -EINVAL; /* * In case of a short read, punt to async. This can happen @@ -1473,7 +1485,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, if (unlikely(!(file->f_mode & FMODE_WRITE))) return -EBADF; - ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter); + ret = io_import_iovec(req->ctx, WRITE, req, &iovec, &iter); if (ret < 0) return ret; @@ -1514,8 +1526,10 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, if (file->f_op->write_iter) ret2 = call_write_iter(file, kiocb, &iter); - else + else if (req->file->f_op->write) ret2 = loop_rw_iter(WRITE, file, kiocb, &iter); + else + ret2 = -EINVAL; if (!force_nonblock) current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; @@ -2092,15 +2106,14 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct sqe_submit *s, bool force_nonblock) { - int ret, opcode; + int ret; req->user_data = READ_ONCE(s->sqe->user_data); if (unlikely(s->index >= ctx->sq_entries)) return -EINVAL; - opcode = READ_ONCE(s->sqe->opcode); - switch (opcode) { + switch (req->submit.opcode) { case IORING_OP_NOP: ret = io_nop(req, req->user_data); break; @@ -2164,10 +2177,10 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, return 0; } -static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx, - const struct io_uring_sqe *sqe) +static struct async_list *io_async_list_from_req(struct io_ring_ctx *ctx, + struct io_kiocb *req) { - switch (sqe->opcode) { + switch (req->submit.opcode) { case IORING_OP_READV: case IORING_OP_READ_FIXED: return &ctx->pending_async[READ]; @@ -2179,12 +2192,10 @@ static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx, } } -static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe) +static inline bool io_req_needs_user(struct io_kiocb *req) { - u8 opcode = READ_ONCE(sqe->opcode); - - return !(opcode == IORING_OP_READ_FIXED || - opcode == IORING_OP_WRITE_FIXED); + return !(req->submit.opcode == IORING_OP_READ_FIXED || + req->submit.opcode == IORING_OP_WRITE_FIXED); } static void io_sq_wq_submit_work(struct work_struct *work) @@ -2200,7 +2211,9 @@ static void io_sq_wq_submit_work(struct work_struct *work) int ret; old_cred = override_creds(ctx->creds); - async_list = io_async_list_from_sqe(ctx, req->submit.sqe); + async_list = io_async_list_from_req(ctx, req); + + allow_kernel_signal(SIGINT); restart: do { struct sqe_submit *s = &req->submit; @@ -2220,9 +2233,10 @@ restart: } ret = 0; - if (io_sqe_needs_user(sqe) && !cur_mm) { + if (io_req_needs_user(req) && !cur_mm) { if (!mmget_not_zero(ctx->sqo_mm)) { ret = -EFAULT; + goto end_req; } else { cur_mm = ctx->sqo_mm; use_mm(cur_mm); @@ -2232,6 +2246,12 @@ restart: } if (!ret) { + req->work_task = current; + if (req->flags & REQ_F_CANCEL) { + ret = -ECANCELED; + goto end_req; + } + s->has_user = cur_mm != NULL; s->needs_lock = true; do { @@ -2246,6 +2266,12 @@ restart: break; cond_resched(); } while (1); +end_req: + if (!list_empty(&req->task_list)) { + spin_lock_irq(&ctx->task_lock); + list_del_init(&req->task_list); + spin_unlock_irq(&ctx->task_lock); + } } /* drop submission reference */ @@ -2311,6 +2337,7 @@ restart: } out: + disallow_signal(SIGINT); if (cur_mm) { set_fs(old_fs); unuse_mm(cur_mm); @@ -2351,15 +2378,22 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req) list_del_init(&req->list); ret = false; } + + if (ret) { + struct io_ring_ctx *ctx = req->ctx; + + spin_lock_irq(&ctx->task_lock); + list_add(&req->task_list, &ctx->task_list); + req->work_task = NULL; + spin_unlock_irq(&ctx->task_lock); + } spin_unlock(&list->lock); return ret; } -static bool io_op_needs_file(const struct io_uring_sqe *sqe) +static bool io_op_needs_file(struct io_kiocb *req) { - int op = READ_ONCE(sqe->opcode); - - switch (op) { + switch (req->submit.opcode) { case IORING_OP_NOP: case IORING_OP_POLL_REMOVE: case IORING_OP_TIMEOUT: @@ -2387,7 +2421,7 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s, */ req->sequence = s->sequence; - if (!io_op_needs_file(s->sqe)) + if (!io_op_needs_file(req)) return 0; if (flags & IOSQE_FIXED_FILE) { @@ -2428,7 +2462,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, s->sqe = sqe_copy; memcpy(&req->submit, s, sizeof(*s)); - list = io_async_list_from_sqe(ctx, s->sqe); + list = io_async_list_from_req(ctx, req); if (!io_add_to_prev_work(list, req)) { if (list) atomic_inc(&list->cnt); @@ -2538,6 +2572,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s, goto err; } + memcpy(&req->submit, s, sizeof(*s)); ret = io_req_set_file(ctx, s, state, req); if (unlikely(ret)) { err_req: @@ -2550,7 +2585,7 @@ err: req->user_data = s->sqe->user_data; #if defined(CONFIG_NET) - switch (READ_ONCE(s->sqe->opcode)) { + switch (req->submit.opcode) { case IORING_OP_SENDMSG: case IORING_OP_RECVMSG: spin_lock(¤t->fs->lock); @@ -2665,6 +2700,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s) if (head < ctx->sq_entries) { s->index = head; s->sqe = &ctx->sq_sqes[head]; + s->opcode = READ_ONCE(s->sqe->opcode); s->sequence = ctx->cached_sq_head; ctx->cached_sq_head++; return true; @@ -3368,6 +3404,9 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, return SIZE_MAX; #endif + if (sq_offset) + *sq_offset = off; + sq_array_size = array_size(sizeof(u32), sq_entries); if (sq_array_size == SIZE_MAX) return SIZE_MAX; @@ -3375,9 +3414,6 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, if (check_add_overflow(off, sq_array_size, &off)) return SIZE_MAX; - if (sq_offset) - *sq_offset = off; - return off; } @@ -3675,12 +3711,32 @@ static int io_uring_fasync(int fd, struct file *file, int on) return fasync_helper(fd, file, on, &ctx->cq_fasync); } +static void io_cancel_async_work(struct io_ring_ctx *ctx, + struct task_struct *task) +{ + if (list_empty(&ctx->task_list)) + return; + + spin_lock_irq(&ctx->task_lock); + while (!list_empty(&ctx->task_list)) { + struct io_kiocb *req; + + req = list_first_entry(&ctx->task_list, struct io_kiocb, task_list); + list_del_init(&req->task_list); + req->flags |= REQ_F_CANCEL; + if (req->work_task && (!task || req->task == task)) + send_sig(SIGINT, req->work_task, 1); + } + spin_unlock_irq(&ctx->task_lock); +} + static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) { mutex_lock(&ctx->uring_lock); percpu_ref_kill(&ctx->refs); mutex_unlock(&ctx->uring_lock); + io_cancel_async_work(ctx, NULL); io_kill_timeouts(ctx); io_poll_remove_all(ctx); io_iopoll_reap_events(ctx); @@ -3688,6 +3744,16 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) io_ring_ctx_free(ctx); } +static int io_uring_flush(struct file *file, void *data) +{ + struct io_ring_ctx *ctx = file->private_data; + + if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) + io_cancel_async_work(ctx, current); + + return 0; +} + static int io_uring_release(struct inode *inode, struct file *file) { struct io_ring_ctx *ctx = file->private_data; @@ -3792,6 +3858,7 @@ out_fput: static const struct file_operations io_uring_fops = { .release = io_uring_release, + .flush = io_uring_flush, .mmap = io_uring_mmap, .poll = io_uring_poll, .fasync = io_uring_fasync, @@ -3803,6 +3870,10 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, struct io_rings *rings; size_t size, sq_array_offset; + /* make sure these are sane, as we already accounted them */ + ctx->sq_entries = p->sq_entries; + ctx->cq_entries = p->cq_entries; + size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset); if (size == SIZE_MAX) return -EOVERFLOW; @@ -3819,8 +3890,6 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, rings->cq_ring_entries = p->cq_entries; ctx->sq_mask = rings->sq_ring_mask; ctx->cq_mask = rings->cq_ring_mask; - ctx->sq_entries = rings->sq_ring_entries; - ctx->cq_entries = rings->cq_ring_entries; size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); if (size == SIZE_MAX) { diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index c1ce2805c563..b7c5819bfc41 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -96,7 +96,6 @@ EXPORT_SYMBOL(jbd2_journal_release_jbd_inode); EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate); EXPORT_SYMBOL(jbd2_inode_cache); -static void __journal_abort_soft (journal_t *journal, int errno); static int jbd2_journal_create_slab(size_t slab_size); #ifdef CONFIG_JBD2_DEBUG @@ -805,7 +804,7 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr, "at offset %lu on %s\n", __func__, blocknr, journal->j_devname); err = -EIO; - __journal_abort_soft(journal, err); + jbd2_journal_abort(journal, err); } } else { *retp = blocknr; /* +journal->j_blk_offset */ @@ -1349,8 +1348,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags) int ret; /* Buffer got discarded which means block device got invalidated */ - if (!buffer_mapped(bh)) + if (!buffer_mapped(bh)) { + unlock_buffer(bh); return -EIO; + } trace_jbd2_write_superblock(journal, write_flags); if (!(journal->j_flags & JBD2_BARRIER)) @@ -2070,64 +2071,6 @@ int jbd2_journal_wipe(journal_t *journal, int write) return err; } -/* - * Journal abort has very specific semantics, which we describe - * for journal abort. - * - * Two internal functions, which provide abort to the jbd layer - * itself are here. - */ - -/* - * Quick version for internal journal use (doesn't lock the journal). - * Aborts hard --- we mark the abort as occurred, but do _nothing_ else, - * and don't attempt to make any other journal updates. - */ -void __jbd2_journal_abort_hard(journal_t *journal) -{ - transaction_t *transaction; - - if (journal->j_flags & JBD2_ABORT) - return; - - printk(KERN_ERR "Aborting journal on device %s.\n", - journal->j_devname); - - write_lock(&journal->j_state_lock); - journal->j_flags |= JBD2_ABORT; - transaction = journal->j_running_transaction; - if (transaction) - __jbd2_log_start_commit(journal, transaction->t_tid); - write_unlock(&journal->j_state_lock); -} - -/* Soft abort: record the abort error status in the journal superblock, - * but don't do any other IO. */ -static void __journal_abort_soft (journal_t *journal, int errno) -{ - int old_errno; - - write_lock(&journal->j_state_lock); - old_errno = journal->j_errno; - if (!journal->j_errno || errno == -ESHUTDOWN) - journal->j_errno = errno; - - if (journal->j_flags & JBD2_ABORT) { - write_unlock(&journal->j_state_lock); - if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN) - jbd2_journal_update_sb_errno(journal); - return; - } - write_unlock(&journal->j_state_lock); - - __jbd2_journal_abort_hard(journal); - - jbd2_journal_update_sb_errno(journal); - write_lock(&journal->j_state_lock); - journal->j_flags |= JBD2_REC_ERR; - write_unlock(&journal->j_state_lock); -} - /** * void jbd2_journal_abort () - Shutdown the journal immediately. * @journal: the journal to shutdown. @@ -2171,7 +2114,47 @@ static void __journal_abort_soft (journal_t *journal, int errno) void jbd2_journal_abort(journal_t *journal, int errno) { - __journal_abort_soft(journal, errno); + transaction_t *transaction; + + /* + * ESHUTDOWN always takes precedence because a file system check + * caused by any other journal abort error is not required after + * a shutdown triggered. + */ + write_lock(&journal->j_state_lock); + if (journal->j_flags & JBD2_ABORT) { + int old_errno = journal->j_errno; + + write_unlock(&journal->j_state_lock); + if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN) { + journal->j_errno = errno; + jbd2_journal_update_sb_errno(journal); + } + return; + } + + /* + * Mark the abort as occurred and start current running transaction + * to release all journaled buffer. + */ + pr_err("Aborting journal on device %s.\n", journal->j_devname); + + journal->j_flags |= JBD2_ABORT; + journal->j_errno = errno; + transaction = journal->j_running_transaction; + if (transaction) + __jbd2_log_start_commit(journal, transaction->t_tid); + write_unlock(&journal->j_state_lock); + + /* + * Record errno to the journal super block, so that fsck and jbd2 + * layer could realise that a filesystem check is needed. + */ + jbd2_journal_update_sb_errno(journal); + + write_lock(&journal->j_state_lock); + journal->j_flags |= JBD2_REC_ERR; + write_unlock(&journal->j_state_lock); } /** diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index de992a70ddfe..90453309345d 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1983,6 +1983,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) */ static void __jbd2_journal_unfile_buffer(struct journal_head *jh) { + J_ASSERT_JH(jh, jh->b_transaction != NULL); + J_ASSERT_JH(jh, jh->b_next_transaction == NULL); + __jbd2_journal_temp_unlink_buffer(jh); jh->b_transaction = NULL; jbd2_journal_put_journal_head(jh); @@ -2074,6 +2077,7 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, { struct buffer_head *head; struct buffer_head *bh; + bool has_write_io_error = false; int ret = 0; J_ASSERT(PageLocked(page)); @@ -2098,11 +2102,26 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, jbd_unlock_bh_state(bh); if (buffer_jbd(bh)) goto busy; + + /* + * If we free a metadata buffer which has been failed to + * write out, the jbd2 checkpoint procedure will not detect + * this failure and may lead to filesystem inconsistency + * after cleanup journal tail. + */ + if (buffer_write_io_error(bh)) { + pr_err("JBD2: Error while async write back metadata bh %llu.", + (unsigned long long)bh->b_blocknr); + has_write_io_error = true; + } } while ((bh = bh->b_this_page) != head); ret = try_to_free_buffers(page); busy: + if (has_write_io_error) + jbd2_journal_abort(journal, -EIO); + return ret; } @@ -2530,6 +2549,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh) was_dirty = test_clear_buffer_jbddirty(bh); __jbd2_journal_temp_unlink_buffer(jh); + + /* + * b_transaction must be set, otherwise the new b_transaction won't + * be holding jh reference + */ + J_ASSERT_JH(jh, jh->b_transaction != NULL); + /* * We set b_transaction here because b_next_transaction will inherit * our jh reference and thus __jbd2_journal_file_buffer() must not diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index f20cff1194bb..776493713153 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) int ret; uint32_t now = JFFS2_NOW(); + mutex_lock(&f->sem); for (fd = f->dents ; fd; fd = fd->next) { - if (fd->ino) + if (fd->ino) { + mutex_unlock(&f->sem); return -ENOTEMPTY; + } } + mutex_unlock(&f->sem); ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, dentry->d_name.len, f, now); diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index e8c792b49616..c35bbaa19486 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -912,7 +912,7 @@ repeat: } fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE, - &name, 0); + NULL, 0); iput(inode); } diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 7cb5fd38eb14..7b09a9158e40 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -150,6 +150,25 @@ static int minix_remount (struct super_block * sb, int * flags, char * data) return 0; } +static bool minix_check_superblock(struct super_block *sb) +{ + struct minix_sb_info *sbi = minix_sb(sb); + + if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) + return false; + + /* + * s_max_size must not exceed the block mapping limitation. This check + * is only needed for V1 filesystems, since V2/V3 support an extra level + * of indirect blocks which places the limit well above U32_MAX. + */ + if (sbi->s_version == MINIX_V1 && + sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE) + return false; + + return true; +} + static int minix_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh; @@ -185,7 +204,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) sbi->s_zmap_blocks = ms->s_zmap_blocks; sbi->s_firstdatazone = ms->s_firstdatazone; sbi->s_log_zone_size = ms->s_log_zone_size; - sbi->s_max_size = ms->s_max_size; + s->s_maxbytes = ms->s_max_size; s->s_magic = ms->s_magic; if (s->s_magic == MINIX_SUPER_MAGIC) { sbi->s_version = MINIX_V1; @@ -216,7 +235,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) sbi->s_zmap_blocks = m3s->s_zmap_blocks; sbi->s_firstdatazone = m3s->s_firstdatazone; sbi->s_log_zone_size = m3s->s_log_zone_size; - sbi->s_max_size = m3s->s_max_size; + s->s_maxbytes = m3s->s_max_size; sbi->s_ninodes = m3s->s_ninodes; sbi->s_nzones = m3s->s_zones; sbi->s_dirsize = 64; @@ -228,11 +247,12 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) } else goto out_no_fs; + if (!minix_check_superblock(s)) + goto out_illegal_sb; + /* * Allocate the buffer map to keep the superblock small. */ - if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) - goto out_illegal_sb; i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); map = kzalloc(i, GFP_KERNEL); if (!map) @@ -468,6 +488,13 @@ static struct inode *V1_minix_iget(struct inode *inode) iget_failed(inode); return ERR_PTR(-EIO); } + if (raw_inode->i_nlinks == 0) { + printk("MINIX-fs: deleted inode referenced: %lu\n", + inode->i_ino); + brelse(bh); + iget_failed(inode); + return ERR_PTR(-ESTALE); + } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); @@ -501,6 +528,13 @@ static struct inode *V2_minix_iget(struct inode *inode) iget_failed(inode); return ERR_PTR(-EIO); } + if (raw_inode->i_nlinks == 0) { + printk("MINIX-fs: deleted inode referenced: %lu\n", + inode->i_ino); + brelse(bh); + iget_failed(inode); + return ERR_PTR(-ESTALE); + } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c index 043c3fdbc8e7..446148792f41 100644 --- a/fs/minix/itree_common.c +++ b/fs/minix/itree_common.c @@ -75,6 +75,7 @@ static int alloc_branch(struct inode *inode, int n = 0; int i; int parent = minix_new_block(inode); + int err = -ENOSPC; branch[0].key = cpu_to_block(parent); if (parent) for (n = 1; n < num; n++) { @@ -85,6 +86,11 @@ static int alloc_branch(struct inode *inode, break; branch[n].key = cpu_to_block(nr); bh = sb_getblk(inode->i_sb, parent); + if (!bh) { + minix_free_block(inode, nr); + err = -ENOMEM; + break; + } lock_buffer(bh); memset(bh->b_data, 0, bh->b_size); branch[n].bh = bh; @@ -103,7 +109,7 @@ static int alloc_branch(struct inode *inode, bforget(branch[i].bh); for (i = 0; i < n; i++) minix_free_block(inode, block_to_cpu(branch[i].key)); - return -ENOSPC; + return err; } static inline int splice_branch(struct inode *inode, diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c index 046cc96ee7ad..1fed906042aa 100644 --- a/fs/minix/itree_v1.c +++ b/fs/minix/itree_v1.c @@ -29,12 +29,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, inode->i_sb->s_bdev); - } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) { - if (printk_ratelimit()) - printk("MINIX-fs: block_to_path: " - "block %ld too big on dev %pg\n", - block, inode->i_sb->s_bdev); - } else if (block < 7) { + return 0; + } + if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes) + return 0; + + if (block < 7) { offsets[n++] = block; } else if ((block -= 7) < 512) { offsets[n++] = 7; diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c index f7fc7ecccccc..9d00f31a2d9d 100644 --- a/fs/minix/itree_v2.c +++ b/fs/minix/itree_v2.c @@ -32,13 +32,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, sb->s_bdev); - } else if ((u64)block * (u64)sb->s_blocksize >= - minix_sb(sb)->s_max_size) { - if (printk_ratelimit()) - printk("MINIX-fs: block_to_path: " - "block %ld too big on dev %pg\n", - block, sb->s_bdev); - } else if (block < DIRCOUNT) { + return 0; + } + if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes) + return 0; + + if (block < DIRCOUNT) { offsets[n++] = block; } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) { offsets[n++] = DIRCOUNT; diff --git a/fs/minix/minix.h b/fs/minix/minix.h index df081e8afcc3..168d45d3de73 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h @@ -32,7 +32,6 @@ struct minix_sb_info { unsigned long s_zmap_blocks; unsigned long s_firstdatazone; unsigned long s_log_zone_size; - unsigned long s_max_size; int s_dirsize; int s_namelen; struct buffer_head ** s_imap; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 95dc90570786..387a2cfa7e17 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -140,6 +140,7 @@ static int nfs_file_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); + errseq_t since; dprintk("NFS: flush(%pD2)\n", file); @@ -148,7 +149,9 @@ nfs_file_flush(struct file *file, fl_owner_t id) return 0; /* Flush writes to the server and return any errors */ - return nfs_wb_all(inode); + since = filemap_sample_wb_err(file->f_mapping); + nfs_wb_all(inode); + return filemap_check_wb_err(file->f_mapping, since); } ssize_t @@ -580,12 +583,14 @@ static const struct vm_operations_struct nfs_file_vm_ops = { .page_mkwrite = nfs_vm_page_mkwrite, }; -static int nfs_need_check_write(struct file *filp, struct inode *inode) +static int nfs_need_check_write(struct file *filp, struct inode *inode, + int error) { struct nfs_open_context *ctx; ctx = nfs_file_open_context(filp); - if (nfs_ctx_key_to_expire(ctx, inode)) + if (nfs_error_is_fatal_on_server(error) || + nfs_ctx_key_to_expire(ctx, inode)) return 1; return 0; } @@ -596,6 +601,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(file); unsigned long written = 0; ssize_t result; + errseq_t since; + int error; result = nfs_key_timeout_notify(file, inode); if (result) @@ -620,6 +627,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) if (iocb->ki_pos > i_size_read(inode)) nfs_revalidate_mapping(inode, file->f_mapping); + since = filemap_sample_wb_err(file->f_mapping); nfs_start_io_write(inode); result = generic_write_checks(iocb, from); if (result > 0) { @@ -638,7 +646,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) goto out; /* Return error values */ - if (nfs_need_check_write(file, inode)) { + error = filemap_check_wb_err(file->f_mapping, since); + if (nfs_need_check_write(file, inode, error)) { int err = nfs_wb_all(inode); if (err < 0) result = err; diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 5657b7f2611f..1741d902b0d8 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -984,9 +984,8 @@ retry: goto out_mds; /* Use a direct mapping of ds_idx to pgio mirror_idx */ - if (WARN_ON_ONCE(pgio->pg_mirror_count != - FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))) - goto out_mds; + if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)) + goto out_eagain; for (i = 0; i < pgio->pg_mirror_count; i++) { mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); @@ -1008,7 +1007,10 @@ retry: (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) pgio->pg_maxretrans = io_maxretrans; return; - +out_eagain: + pnfs_generic_pg_cleanup(pgio); + pgio->pg_error = -EAGAIN; + return; out_mds: trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode, 0, NFS4_MAX_UINT64, IOMODE_RW, @@ -1018,6 +1020,7 @@ out_mds: pgio->pg_lseg = NULL; pgio->pg_maxretrans = 0; nfs_pageio_reset_write_mds(pgio); + pgio->pg_error = -EAGAIN; } static unsigned int diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 3802c88e8372..6de41f741280 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -826,6 +826,8 @@ int nfs_getattr(const struct path *path, struct kstat *stat, do_update |= cache_validity & NFS_INO_INVALID_ATIME; if (request_mask & (STATX_CTIME|STATX_MTIME)) do_update |= cache_validity & NFS_INO_REVAL_PAGECACHE; + if (request_mask & STATX_BLOCKS) + do_update |= cache_validity & NFS_INO_INVALID_BLOCKS; if (do_update) { /* Update the attribute cache */ if (!(server->flags & NFS_MOUNT_NOAC)) @@ -1750,7 +1752,8 @@ out_noforce: status = nfs_post_op_update_inode_locked(inode, fattr, NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME - | NFS_INO_INVALID_MTIME); + | NFS_INO_INVALID_MTIME + | NFS_INO_INVALID_BLOCKS); return status; } @@ -1857,7 +1860,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ATIME | NFS_INO_REVAL_FORCED - | NFS_INO_REVAL_PAGECACHE); + | NFS_INO_REVAL_PAGECACHE + | NFS_INO_INVALID_BLOCKS); /* Do atomic weak cache consistency updates */ nfs_wcc_update_inode(inode, fattr); @@ -2019,8 +2023,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used); } else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) inode->i_blocks = fattr->du.nfs2.blocks; - else + else { + nfsi->cache_validity |= save_cache_validity & + (NFS_INO_INVALID_BLOCKS + | NFS_INO_REVAL_FORCED); cache_revalidated = false; + } /* Update attrtimeo value if we're out of the unstable period */ if (attr_changed) { diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index fb55c04cdc6b..534b6fd70ffd 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -109,6 +109,7 @@ static int nfs4_file_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); + errseq_t since; dprintk("NFS: flush(%pD2)\n", file); @@ -124,7 +125,9 @@ nfs4_file_flush(struct file *file, fl_owner_t id) return filemap_fdatawrite(file->f_mapping); /* Flush writes to the server and return any errors */ - return nfs_wb_all(inode); + since = filemap_sample_wb_err(file->f_mapping); + nfs_wb_all(inode); + return filemap_check_wb_err(file->f_mapping, since); } #ifdef CONFIG_NFS_V4_2 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e257653f25ab..00435556db0c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -774,6 +774,14 @@ static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, slot->seq_nr_last_acked = seqnr; } +static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, + struct nfs4_slot *slot) +{ + struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); + if (!IS_ERR(task)) + rpc_put_task_async(task); +} + static int nfs41_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) { @@ -790,6 +798,7 @@ static int nfs41_sequence_process(struct rpc_task *task, goto out; session = slot->table->session; + clp = session->clp; trace_nfs4_sequence_done(session, res); @@ -804,7 +813,6 @@ static int nfs41_sequence_process(struct rpc_task *task, nfs4_slot_sequence_acked(slot, slot->seq_nr); /* Update the slot's sequence and clientid lease timer */ slot->seq_done = 1; - clp = session->clp; do_renew_lease(clp, res->sr_timestamp); /* Check sequence flags */ nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, @@ -852,10 +860,18 @@ static int nfs41_sequence_process(struct rpc_task *task, /* * Were one or more calls using this slot interrupted? * If the server never received the request, then our - * transmitted slot sequence number may be too high. + * transmitted slot sequence number may be too high. However, + * if the server did receive the request then it might + * accidentally give us a reply with a mismatched operation. + * We can sort this out by sending a lone sequence operation + * to the server on the same slot. */ if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { slot->seq_nr--; + if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { + nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); + res->sr_slot = NULL; + } goto retry_nowait; } /* @@ -3241,8 +3257,10 @@ static int _nfs4_do_setattr(struct inode *inode, /* Servers should only apply open mode checks for file size changes */ truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; - if (!truncate) + if (!truncate) { + nfs4_inode_make_writeable(inode); goto zero_stateid; + } if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { /* Use that stateid */ @@ -5795,8 +5813,6 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf, return ret; if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) return -ENOENT; - if (buflen < label.len) - return -ERANGE; return 0; } @@ -7218,7 +7234,12 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, err = nfs4_set_lock_state(state, fl); if (err != 0) return err; - err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); + do { + err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); + if (err != -NFS4ERR_DELAY) + break; + ssleep(1); + } while (err == -NFS4ERR_DELAY); return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); } @@ -7870,7 +7891,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) } static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { - .rpc_call_done = &nfs4_bind_one_conn_to_session_done, + .rpc_call_done = nfs4_bind_one_conn_to_session_done, }; /* diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 7c0ff1a3b591..677751bc3a33 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4169,7 +4169,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap, return -EIO; if (len < NFS4_MAXLABELLEN) { if (label) { - memcpy(label->label, p, len); + if (label->len) { + if (label->len < len) + return -ERANGE; + memcpy(label->label, p, len); + } label->len = len; label->pi = pi; label->lfs = lfs; diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 443639cbb0cf..9c2b07ce57b2 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1198,31 +1198,27 @@ out: return status; } +static bool +pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo, + enum pnfs_iomode iomode, + u32 seq) +{ + struct pnfs_layout_range recall_range = { + .length = NFS4_MAX_UINT64, + .iomode = iomode, + }; + return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, + &recall_range, seq) != -EBUSY; +} + /* Return true if layoutreturn is needed */ static bool pnfs_layout_need_return(struct pnfs_layout_hdr *lo) { - struct pnfs_layout_segment *s; - enum pnfs_iomode iomode; - u32 seq; - if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) return false; - - seq = lo->plh_return_seq; - iomode = lo->plh_return_iomode; - - /* Defer layoutreturn until all recalled lsegs are done */ - list_for_each_entry(s, &lo->plh_segs, pls_list) { - if (seq && pnfs_seqid_is_newer(s->pls_seq, seq)) - continue; - if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode) - continue; - if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags)) - return false; - } - - return true; + return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode, + lo->plh_return_seq); } static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo) @@ -2362,16 +2358,6 @@ out_forget: return ERR_PTR(-EAGAIN); } -static int -mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg, - struct list_head *tmp_list) -{ - if (!mark_lseg_invalid(lseg, tmp_list)) - return 0; - pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg); - return 1; -} - /** * pnfs_mark_matching_lsegs_return - Free or return matching layout segments * @lo: pointer to layout header @@ -2408,7 +2394,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, lseg, lseg->pls_range.iomode, lseg->pls_range.offset, lseg->pls_range.length); - if (mark_lseg_invalid_or_return(lseg, tmp_list)) + if (mark_lseg_invalid(lseg, tmp_list)) continue; remaining++; set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h index 10ec5ecdf117..65c331f75e9c 100644 --- a/fs/nfsd/cache.h +++ b/fs/nfsd/cache.h @@ -78,6 +78,8 @@ enum { /* Checksum this amount of the request */ #define RC_CSUMLEN (256U) +int nfsd_drc_slab_create(void); +void nfsd_drc_slab_free(void); int nfsd_reply_cache_init(struct nfsd_net *); void nfsd_reply_cache_shutdown(struct nfsd_net *); int nfsd_cache_lookup(struct svc_rqst *); diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 9a4ef815fb8c..ed53e206a299 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -139,7 +139,6 @@ struct nfsd_net { * Duplicate reply cache */ struct nfsd_drc_bucket *drc_hashtbl; - struct kmem_cache *drc_slab; /* max number of entries allowed in the cache */ unsigned int max_drc_entries; diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index afca3287184b..efe55d101b0e 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -1230,6 +1230,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) err = setup_callback_client(clp, &conn, ses); if (err) { nfsd4_mark_cb_down(clp, err); + if (c) + svc_xprt_put(c->cn_xprt); return; } } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 8650a97e2ba9..68cf11660764 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -506,6 +506,17 @@ find_any_file(struct nfs4_file *f) return ret; } +static struct nfsd_file *find_deleg_file(struct nfs4_file *f) +{ + struct nfsd_file *ret = NULL; + + spin_lock(&f->fi_lock); + if (f->fi_deleg_file) + ret = nfsd_file_get(f->fi_deleg_file); + spin_unlock(&f->fi_lock); + return ret; +} + static atomic_long_t num_delegations; unsigned long max_delegations; @@ -2378,6 +2389,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) oo = ols->st_stateowner; nf = st->sc_file; file = find_any_file(nf); + if (!file) + return 0; seq_printf(s, "- 0x%16phN: { type: open, ", &st->sc_stateid); @@ -2411,6 +2424,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) oo = ols->st_stateowner; nf = st->sc_file; file = find_any_file(nf); + if (!file) + return 0; seq_printf(s, "- 0x%16phN: { type: lock, ", &st->sc_stateid); @@ -2439,7 +2454,9 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) ds = delegstateid(st); nf = st->sc_file; - file = nf->fi_deleg_file; + file = find_deleg_file(nf); + if (!file) + return 0; seq_printf(s, "- 0x%16phN: { type: deleg, ", &st->sc_stateid); @@ -2451,6 +2468,7 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) nfs4_show_superblock(s, file); seq_printf(s, " }\n"); + nfsd_file_put(file); return 0; } @@ -7705,9 +7723,14 @@ nfs4_state_start_net(struct net *net) struct nfsd_net *nn = net_generic(net, nfsd_net_id); int ret; - ret = nfs4_state_create_net(net); + ret = get_nfsdfs(net); if (ret) return ret; + ret = nfs4_state_create_net(net); + if (ret) { + mntput(nn->nfsd_mnt); + return ret; + } locks_start_grace(net, &nn->nfsd4_manager); nfsd4_client_tracking_init(net); if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) @@ -7776,6 +7799,7 @@ nfs4_state_shutdown_net(struct net *net) nfsd4_client_tracking_exit(net); nfs4_state_destroy_net(net); + mntput(nn->nfsd_mnt); } void diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 533d0fc3c96b..d6f244559e75 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3530,17 +3530,17 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp, u32 zzz = 0; int pad; + /* + * svcrdma requires every READ payload to start somewhere + * in xdr->pages. + */ + if (xdr->iov == xdr->buf->head) { + xdr->iov = NULL; + xdr->end = xdr->p; + } + len = maxcount; v = 0; - - thislen = min_t(long, len, ((void *)xdr->end - (void *)xdr->p)); - p = xdr_reserve_space(xdr, (thislen+3)&~3); - WARN_ON_ONCE(!p); - resp->rqstp->rq_vec[v].iov_base = p; - resp->rqstp->rq_vec[v].iov_len = thislen; - v++; - len -= thislen; - while (len) { thislen = min_t(long, len, PAGE_SIZE); p = xdr_reserve_space(xdr, (thislen+3)&~3); @@ -3559,6 +3559,8 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp, read->rd_length = maxcount; if (nfserr) return nfserr; + if (svc_encode_read_payload(resp->rqstp, starting_len + 8, maxcount)) + return nfserr_io; xdr_truncate_encode(xdr, starting_len + 8 + ((maxcount+3)&~3)); tmp = htonl(eof); diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 96352ab7bd81..4a258065188e 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -36,6 +36,8 @@ struct nfsd_drc_bucket { spinlock_t cache_lock; }; +static struct kmem_cache *drc_slab; + static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc); @@ -95,7 +97,7 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum, { struct svc_cacherep *rp; - rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL); + rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); if (rp) { rp->c_state = RC_UNUSED; rp->c_type = RC_NOCACHE; @@ -129,7 +131,7 @@ nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, atomic_dec(&nn->num_drc_entries); nn->drc_mem_usage -= sizeof(*rp); } - kmem_cache_free(nn->drc_slab, rp); + kmem_cache_free(drc_slab, rp); } static void @@ -141,6 +143,18 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, spin_unlock(&b->cache_lock); } +int nfsd_drc_slab_create(void) +{ + drc_slab = kmem_cache_create("nfsd_drc", + sizeof(struct svc_cacherep), 0, 0, NULL); + return drc_slab ? 0: -ENOMEM; +} + +void nfsd_drc_slab_free(void) +{ + kmem_cache_destroy(drc_slab); +} + int nfsd_reply_cache_init(struct nfsd_net *nn) { unsigned int hashsize; @@ -159,18 +173,13 @@ int nfsd_reply_cache_init(struct nfsd_net *nn) if (status) goto out_nomem; - nn->drc_slab = kmem_cache_create("nfsd_drc", - sizeof(struct svc_cacherep), 0, 0, NULL); - if (!nn->drc_slab) - goto out_shrinker; - nn->drc_hashtbl = kcalloc(hashsize, sizeof(*nn->drc_hashtbl), GFP_KERNEL); if (!nn->drc_hashtbl) { nn->drc_hashtbl = vzalloc(array_size(hashsize, sizeof(*nn->drc_hashtbl))); if (!nn->drc_hashtbl) - goto out_slab; + goto out_shrinker; } for (i = 0; i < hashsize; i++) { @@ -180,8 +189,6 @@ int nfsd_reply_cache_init(struct nfsd_net *nn) nn->drc_hashsize = hashsize; return 0; -out_slab: - kmem_cache_destroy(nn->drc_slab); out_shrinker: unregister_shrinker(&nn->nfsd_reply_cache_shrinker); out_nomem: @@ -209,8 +216,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn) nn->drc_hashtbl = NULL; nn->drc_hashsize = 0; - kmem_cache_destroy(nn->drc_slab); - nn->drc_slab = NULL; } /* @@ -464,8 +469,7 @@ found_entry: rtn = RC_REPLY; break; default: - printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); - nfsd_reply_cache_free_locked(b, rp, nn); + WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type); } goto out; diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index d77c5261c03c..be418fccc9d8 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1335,6 +1335,7 @@ void nfsd_client_rmdir(struct dentry *dentry) WARN_ON_ONCE(ret); fsnotify_rmdir(dir, dentry); d_delete(dentry); + dput(dentry); inode_unlock(dir); } @@ -1424,6 +1425,18 @@ static struct file_system_type nfsd_fs_type = { }; MODULE_ALIAS_FS("nfsd"); +int get_nfsdfs(struct net *net) +{ + struct nfsd_net *nn = net_generic(net, nfsd_net_id); + struct vfsmount *mnt; + + mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL); + if (IS_ERR(mnt)) + return PTR_ERR(mnt); + nn->nfsd_mnt = mnt; + return 0; +} + #ifdef CONFIG_PROC_FS static int create_proc_exports_entry(void) { @@ -1452,7 +1465,6 @@ unsigned int nfsd_net_id; static __net_init int nfsd_init_net(struct net *net) { int retval; - struct vfsmount *mnt; struct nfsd_net *nn = net_generic(net, nfsd_net_id); retval = nfsd_export_init(net); @@ -1479,16 +1491,8 @@ static __net_init int nfsd_init_net(struct net *net) init_waitqueue_head(&nn->ntf_wq); seqlock_init(&nn->boot_lock); - mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL); - if (IS_ERR(mnt)) { - retval = PTR_ERR(mnt); - goto out_mount_err; - } - nn->nfsd_mnt = mnt; return 0; -out_mount_err: - nfsd_reply_cache_shutdown(nn); out_drc_error: nfsd_idmap_shutdown(net); out_idmap_error: @@ -1501,7 +1505,6 @@ static __net_exit void nfsd_exit_net(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); - mntput(nn->nfsd_mnt); nfsd_reply_cache_shutdown(nn); nfsd_idmap_shutdown(net); nfsd_export_shutdown(net); @@ -1534,6 +1537,9 @@ static int __init init_nfsd(void) goto out_free_slabs; nfsd_fault_inject_init(); /* nfsd fault injection controls */ nfsd_stat_init(); /* Statistics */ + retval = nfsd_drc_slab_create(); + if (retval) + goto out_free_stat; nfsd_lockd_init(); /* lockd->nfsd callbacks */ retval = create_proc_exports_entry(); if (retval) @@ -1547,6 +1553,8 @@ out_free_all: remove_proc_entry("fs/nfs", NULL); out_free_lockd: nfsd_lockd_shutdown(); + nfsd_drc_slab_free(); +out_free_stat: nfsd_stat_shutdown(); nfsd_fault_inject_cleanup(); nfsd4_exit_pnfs(); @@ -1561,6 +1569,7 @@ out_unregister_pernet: static void __exit exit_nfsd(void) { + nfsd_drc_slab_free(); remove_proc_entry("fs/nfs/exports", NULL); remove_proc_entry("fs/nfs", NULL); nfsd_stat_shutdown(); diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index af2947551e9c..4ff0c5318a02 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -87,6 +87,8 @@ int nfsd_pool_stats_release(struct inode *, struct file *); void nfsd_destroy(struct net *net); +int get_nfsdfs(struct net *); + struct nfsdfs_client { struct kref cl_ref; void (*cl_release)(struct kref *kref); @@ -97,6 +99,7 @@ struct dentry *nfsd_client_mkdir(struct nfsd_net *nn, struct nfsdfs_client *ncl, u32 id, const struct tree_descr *); void nfsd_client_rmdir(struct dentry *dentry); + #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) #ifdef CONFIG_NFSD_V2_ACL extern const struct svc_version nfsd_acl_version2; diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 005d1802ab40..b6f4b552c9af 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1184,6 +1184,9 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp, iap->ia_mode = 0; iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type; + if (!IS_POSIXACL(dirp)) + iap->ia_mode &= ~current_umask(); + err = 0; host_err = 0; switch (type) { @@ -1416,6 +1419,9 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, goto out; } + if (!IS_POSIXACL(dirp)) + iap->ia_mode &= ~current_umask(); + host_err = vfs_create(dirp, dchild, iap->ia_mode, true); if (host_err < 0) { fh_drop_write(fhp); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 8a2e284ccfcd..50a863fc1779 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -689,6 +689,12 @@ static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res, &ocfs2_nfs_sync_lops, osb); } +static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb) +{ + ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb); + init_rwsem(&osb->nfs_sync_rwlock); +} + void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb) { struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres; @@ -2855,14 +2861,25 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex) if (ocfs2_is_hard_readonly(osb)) return -EROFS; + if (ex) + down_write(&osb->nfs_sync_rwlock); + else + down_read(&osb->nfs_sync_rwlock); + if (ocfs2_mount_local(osb)) return 0; status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE, 0, 0); - if (status < 0) + if (status < 0) { mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status); + if (ex) + up_write(&osb->nfs_sync_rwlock); + else + up_read(&osb->nfs_sync_rwlock); + } + return status; } @@ -2873,6 +2890,10 @@ void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex) if (!ocfs2_mount_local(osb)) ocfs2_cluster_unlock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE); + if (ex) + up_write(&osb->nfs_sync_rwlock); + else + up_read(&osb->nfs_sync_rwlock); } int ocfs2_trim_fs_lock(struct ocfs2_super *osb, @@ -3340,7 +3361,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb) local: ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb); ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb); - ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb); + ocfs2_nfs_sync_lock_init(osb); ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb); osb->cconn = conn; diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 9150cfa4df7d..0a8cd8e59a92 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -326,8 +326,8 @@ struct ocfs2_super spinlock_t osb_lock; u32 s_next_generation; unsigned long osb_flags; - s16 s_inode_steal_slot; - s16 s_meta_steal_slot; + u16 s_inode_steal_slot; + u16 s_meta_steal_slot; atomic_t s_num_inodes_stolen; atomic_t s_num_meta_stolen; @@ -394,6 +394,7 @@ struct ocfs2_super struct ocfs2_lock_res osb_super_lockres; struct ocfs2_lock_res osb_rename_lockres; struct ocfs2_lock_res osb_nfs_sync_lockres; + struct rw_semaphore nfs_sync_rwlock; struct ocfs2_lock_res osb_trim_fs_lockres; struct mutex obs_trim_fs_mutex; struct ocfs2_dlm_debug *osb_dlm_debug; diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 0db4a7ec58a2..dcef83c8796d 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -290,7 +290,7 @@ #define OCFS2_MAX_SLOTS 255 /* Slot map indicator for an empty slot */ -#define OCFS2_INVALID_SLOT -1 +#define OCFS2_INVALID_SLOT ((u16)-1) #define OCFS2_VOL_UUID_LEN 16 #define OCFS2_MAX_VOL_LABEL_LEN 64 @@ -326,8 +326,8 @@ struct ocfs2_system_inode_info { enum { BAD_BLOCK_SYSTEM_INODE = 0, GLOBAL_INODE_ALLOC_SYSTEM_INODE, +#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE, -#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE HEARTBEAT_SYSTEM_INODE, GLOBAL_BITMAP_SYSTEM_INODE, USER_QUOTA_SYSTEM_INODE, diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 69c21a3843af..5e0eaea47405 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -879,9 +879,9 @@ static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type) { spin_lock(&osb->osb_lock); if (type == INODE_ALLOC_SYSTEM_INODE) - osb->s_inode_steal_slot = slot; + osb->s_inode_steal_slot = (u16)slot; else if (type == EXTENT_ALLOC_SYSTEM_INODE) - osb->s_meta_steal_slot = slot; + osb->s_meta_steal_slot = (u16)slot; spin_unlock(&osb->osb_lock); } @@ -2827,9 +2827,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) goto bail; } - inode_alloc_inode = - ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE, - suballoc_slot); + if (suballoc_slot == (u16)OCFS2_INVALID_SLOT) + inode_alloc_inode = ocfs2_get_system_file_inode(osb, + GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot); + else + inode_alloc_inode = ocfs2_get_system_file_inode(osb, + INODE_ALLOC_SYSTEM_INODE, suballoc_slot); if (!inode_alloc_inode) { /* the error code could be inaccurate, but we are not able to * get the correct one. */ diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index c81e86c62380..70d8857b161d 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -78,7 +78,7 @@ struct mount_options unsigned long commit_interval; unsigned long mount_opt; unsigned int atime_quantum; - signed short slot; + unsigned short slot; int localalloc_opt; unsigned int resv_level; int dir_resv_level; @@ -1334,7 +1334,7 @@ static int ocfs2_parse_options(struct super_block *sb, goto bail; } if (option) - mopt->slot = (s16)option; + mopt->slot = (u16)option; break; case Opt_commit: if (match_int(&args[0], &option)) { diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c index a5612abc0936..bcd4fd5ad175 100644 --- a/fs/orangefs/file.c +++ b/fs/orangefs/file.c @@ -311,23 +311,8 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) { int ret; - struct orangefs_read_options *ro; - orangefs_stats.reads++; - /* - * Remember how they set "count" in read(2) or pread(2) or whatever - - * users can use count as a knob to control orangefs io size and later - * we can try to help them fill as many pages as possible in readpage. - */ - if (!iocb->ki_filp->private_data) { - iocb->ki_filp->private_data = kmalloc(sizeof *ro, GFP_KERNEL); - if (!iocb->ki_filp->private_data) - return(ENOMEM); - ro = iocb->ki_filp->private_data; - ro->blksiz = iter->count; - } - down_read(&file_inode(iocb->ki_filp)->i_rwsem); ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp)); if (ret) @@ -615,12 +600,6 @@ static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl) return rc; } -static int orangefs_file_open(struct inode * inode, struct file *file) -{ - file->private_data = NULL; - return generic_file_open(inode, file); -} - static int orangefs_flush(struct file *file, fl_owner_t id) { /* @@ -634,9 +613,6 @@ static int orangefs_flush(struct file *file, fl_owner_t id) struct inode *inode = file->f_mapping->host; int r; - kfree(file->private_data); - file->private_data = NULL; - if (inode->i_state & I_DIRTY_TIME) { spin_lock(&inode->i_lock); inode->i_state &= ~I_DIRTY_TIME; @@ -659,7 +635,7 @@ const struct file_operations orangefs_file_operations = { .lock = orangefs_lock, .unlocked_ioctl = orangefs_ioctl, .mmap = orangefs_file_mmap, - .open = orangefs_file_open, + .open = generic_file_open, .flush = orangefs_flush, .release = orangefs_file_release, .fsync = orangefs_fsync, diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index efb12197da18..636892ffec0b 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -259,46 +259,19 @@ static int orangefs_readpage(struct file *file, struct page *page) pgoff_t index; /* which page */ struct page *next_page; char *kaddr; - struct orangefs_read_options *ro = file->private_data; loff_t read_size; - loff_t roundedup; int buffer_index = -1; /* orangefs shared memory slot */ int slot_index; /* index into slot */ int remaining; /* - * If they set some miniscule size for "count" in read(2) - * (for example) then let's try to read a page, or the whole file - * if it is smaller than a page. Once "count" goes over a page - * then lets round up to the highest page size multiple that is - * less than or equal to "count" and do that much orangefs IO and - * try to fill as many pages as we can from it. - * - * "count" should be represented in ro->blksiz. - * - * inode->i_size = file size. + * Get up to this many bytes from Orangefs at a time and try + * to fill them into the page cache at once. Tests with dd made + * this seem like a reasonable static number, if there was + * interest perhaps this number could be made setable through + * sysfs... */ - if (ro) { - if (ro->blksiz < PAGE_SIZE) { - if (inode->i_size < PAGE_SIZE) - read_size = inode->i_size; - else - read_size = PAGE_SIZE; - } else { - roundedup = ((PAGE_SIZE - 1) & ro->blksiz) ? - ((ro->blksiz + PAGE_SIZE) & ~(PAGE_SIZE -1)) : - ro->blksiz; - if (roundedup > inode->i_size) - read_size = inode->i_size; - else - read_size = roundedup; - - } - } else { - read_size = PAGE_SIZE; - } - if (!read_size) - read_size = PAGE_SIZE; + read_size = 524288; if (PageDirty(page)) orangefs_launder_page(page); diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h index 34a6c99fa29b..3003007681a0 100644 --- a/fs/orangefs/orangefs-kernel.h +++ b/fs/orangefs/orangefs-kernel.h @@ -239,10 +239,6 @@ struct orangefs_write_range { kgid_t gid; }; -struct orangefs_read_options { - ssize_t blksiz; -}; - extern struct orangefs_stats orangefs_stats; /* diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c index 73c9775215b3..11dd8177770d 100644 --- a/fs/overlayfs/export.c +++ b/fs/overlayfs/export.c @@ -482,7 +482,7 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb, if (IS_ERR_OR_NULL(this)) return this; - if (WARN_ON(ovl_dentry_real_at(this, layer->idx) != real)) { + if (ovl_dentry_real_at(this, layer->idx) != real) { dput(this); this = ERR_PTR(-EIO); } diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 15e4fa288475..7a08a576f7b2 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -21,13 +21,16 @@ static char ovl_whatisit(struct inode *inode, struct inode *realinode) return 'm'; } +/* No atime modificaton nor notify on underlying */ +#define OVL_OPEN_FLAGS (O_NOATIME | FMODE_NONOTIFY) + static struct file *ovl_open_realfile(const struct file *file, struct inode *realinode) { struct inode *inode = file_inode(file); struct file *realfile; const struct cred *old_cred; - int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY; + int flags = file->f_flags | OVL_OPEN_FLAGS; old_cred = ovl_override_creds(inode->i_sb); realfile = open_with_fake_path(&file->f_path, flags, realinode, @@ -48,8 +51,7 @@ static int ovl_change_flags(struct file *file, unsigned int flags) struct inode *inode = file_inode(file); int err; - /* No atime modificaton on underlying */ - flags |= O_NOATIME | FMODE_NONOTIFY; + flags |= OVL_OPEN_FLAGS; /* If some flag changed that cannot be changed then something's amiss */ if (WARN_ON((file->f_flags ^ flags) & ~OVL_SETFL_MASK)) @@ -102,7 +104,7 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real, } /* Did the flags change since open? */ - if (unlikely((file->f_flags ^ real->file->f_flags) & ~O_NOATIME)) + if (unlikely((file->f_flags ^ real->file->f_flags) & ~OVL_OPEN_FLAGS)) return ovl_change_flags(real->file, file->f_flags); return 0; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 7621ff176d15..d6b724beb304 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -1258,6 +1258,18 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) if (!ofs->config.nfs_export && !ofs->upper_mnt) return true; + /* + * We allow using single lower with null uuid for index and nfs_export + * for example to support those features with single lower squashfs. + * To avoid regressions in setups of overlay with re-formatted lower + * squashfs, do not allow decoding origin with lower null uuid unless + * user opted-in to one of the new features that require following the + * lower inode of non-dir upper. + */ + if (!ofs->config.index && !ofs->config.metacopy && !ofs->config.xino && + uuid_is_null(uuid)) + return false; + for (i = 0; i < ofs->numlowerfs; i++) { /* * We use uuid to associate an overlay lower file handle with a @@ -1344,14 +1356,23 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs, if (err < 0) goto out; + /* + * Check if lower root conflicts with this overlay layers before + * checking if it is in-use as upperdir/workdir of "another" + * mount, because we do not bother to check in ovl_is_inuse() if + * the upperdir/workdir is in fact in-use by our + * upperdir/workdir. + */ err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir"); if (err) goto out; if (ovl_is_inuse(stack[i].dentry)) { err = ovl_report_in_use(ofs, "lowerdir"); - if (err) + if (err) { + iput(trap); goto out; + } } mnt = clone_private_mount(&stack[i]); diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index cdf5b8ae2583..74a60bae2b23 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -275,6 +275,9 @@ static int pstore_compress(const void *in, void *out, { int ret; + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION)) + return -EINVAL; + ret = crypto_comp_compress(tfm, in, inlen, out, &outlen); if (ret) { pr_err("crypto_comp_compress failed, ret = %d!\n", ret); @@ -661,7 +664,7 @@ static void decompress_record(struct pstore_record *record) int unzipped_len; char *unzipped, *workspace; - if (!record->compressed) + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed) return; /* Only PSTORE_TYPE_DMESG support compression. */ diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c index 6b2b4362089e..b57b3ffcbc32 100644 --- a/fs/romfs/storage.c +++ b/fs/romfs/storage.c @@ -217,10 +217,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos, size_t limit; limit = romfs_maxsize(sb); - if (pos >= limit) + if (pos >= limit || buflen > limit - pos) return -EIO; - if (buflen > limit - pos) - buflen = limit - pos; #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) diff --git a/fs/signalfd.c b/fs/signalfd.c index 44b6845b071c..5b78719be445 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -314,9 +314,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask, { sigset_t mask; - if (sizemask != sizeof(sigset_t) || - copy_from_user(&mask, user_mask, sizeof(mask))) + if (sizemask != sizeof(sigset_t)) return -EINVAL; + if (copy_from_user(&mask, user_mask, sizeof(mask))) + return -EFAULT; return do_signalfd4(ufd, &mask, flags); } @@ -325,9 +326,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask, { sigset_t mask; - if (sizemask != sizeof(sigset_t) || - copy_from_user(&mask, user_mask, sizeof(mask))) + if (sizemask != sizeof(sigset_t)) return -EINVAL; + if (copy_from_user(&mask, user_mask, sizeof(mask))) + return -EFAULT; return do_signalfd4(ufd, &mask, 0); } diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 826dad0243dc..a6ae2428e4c9 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -539,7 +539,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, const struct fscrypt_name *nm, const struct inode *inode, int deletion, int xent) { - int err, dlen, ilen, len, lnum, ino_offs, dent_offs; + int err, dlen, ilen, len, lnum, ino_offs, dent_offs, orphan_added = 0; int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir); int last_reference = !!(deletion && inode->i_nlink == 0); struct ubifs_inode *ui = ubifs_inode(inode); @@ -630,6 +630,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, goto out_finish; } ui->del_cmtno = c->cmt_no; + orphan_added = 1; } err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync); @@ -702,7 +703,7 @@ out_release: kfree(dent); out_ro: ubifs_ro_mode(c, err); - if (last_reference) + if (orphan_added) ubifs_delete_orphan(c, inode->i_ino); finish_reservation(c); return err; @@ -1217,7 +1218,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, void *p; union ubifs_key key; struct ubifs_dent_node *dent, *dent2; - int err, dlen1, dlen2, ilen, lnum, offs, len; + int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0; int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ; int last_reference = !!(new_inode && new_inode->i_nlink == 0); int move = (old_dir != new_dir); @@ -1333,6 +1334,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, goto out_finish; } new_ui->del_cmtno = c->cmt_no; + orphan_added = 1; } err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync); @@ -1414,7 +1416,7 @@ out_release: release_head(c, BASEHD); out_ro: ubifs_ro_mode(c, err); - if (last_reference) + if (orphan_added) ubifs_delete_orphan(c, new_inode->i_ino); out_finish: finish_reservation(c); diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 1da0be667409..e3b69fb280e8 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -101,7 +101,7 @@ static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 gene struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct inode *inode; - if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg) + if (ino < UFS_ROOTINO || ino > (u64)uspi->s_ncg * uspi->s_ipg) return ERR_PTR(-ESTALE); inode = ufs_iget(sb, ino); diff --git a/fs/xattr.c b/fs/xattr.c index 90dd78f0eb27..f2854570d411 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -204,10 +204,22 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, return error; } - +/** + * __vfs_setxattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - xattr name to set + * @value - value to set @name to + * @size - size of @value + * @flags - flags to pass into filesystem operations + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_setxattr(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags) +__vfs_setxattr_locked(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -216,15 +228,40 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value, if (error) return error; - inode_lock(inode); error = security_inode_setxattr(dentry, name, value, size, flags); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_setxattr_noperm(dentry, name, value, size, flags); out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_setxattr_locked); + +int +vfs_setxattr(struct dentry *dentry, const char *name, const void *value, + size_t size, int flags) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_setxattr_locked(dentry, name, value, size, flags, + &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } return error; } EXPORT_SYMBOL_GPL(vfs_setxattr); @@ -378,8 +415,18 @@ __vfs_removexattr(struct dentry *dentry, const char *name) } EXPORT_SYMBOL(__vfs_removexattr); +/** + * __vfs_removexattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - name of xattr to remove + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_removexattr(struct dentry *dentry, const char *name) +__vfs_removexattr_locked(struct dentry *dentry, const char *name, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -388,11 +435,14 @@ vfs_removexattr(struct dentry *dentry, const char *name) if (error) return error; - inode_lock(inode); error = security_inode_removexattr(dentry, name); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_removexattr(dentry, name); if (!error) { @@ -401,12 +451,32 @@ vfs_removexattr(struct dentry *dentry, const char *name) } out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_removexattr_locked); + +int +vfs_removexattr(struct dentry *dentry, const char *name) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_removexattr_locked(dentry, name, &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } + return error; } EXPORT_SYMBOL_GPL(vfs_removexattr); - /* * Extended attribute SET operations */ diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 533b04aaf6f6..0a36f532cf86 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -2598,6 +2598,13 @@ xfs_agf_verify( be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp))) return __this_address; + if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks) + return __this_address; + + if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) || + be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length)) + return __this_address; + if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 || be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 || be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS || @@ -2609,6 +2616,10 @@ xfs_agf_verify( be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS)) return __this_address; + if (xfs_sb_version_hasrmapbt(&mp->m_sb) && + be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length)) + return __this_address; + /* * during growfs operations, the perag is not fully initialised, * so we can't use it for any useful checking. growfs ensures we can't @@ -2622,6 +2633,11 @@ xfs_agf_verify( be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length)) return __this_address; + if (xfs_sb_version_hasreflink(&mp->m_sb) && + be32_to_cpu(agf->agf_refcount_blocks) > + be32_to_cpu(agf->agf_length)) + return __this_address; + if (xfs_sb_version_hasreflink(&mp->m_sb) && (be32_to_cpu(agf->agf_refcount_level) < 1 || be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS)) diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index f0089e862216..5472ed3ce694 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -583,8 +583,8 @@ xfs_attr_shortform_create(xfs_da_args_t *args) ASSERT(ifp->if_flags & XFS_IFINLINE); } xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); - hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; - hdr->count = 0; + hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data; + memset(hdr, 0, sizeof(*hdr)); hdr->totsize = cpu_to_be16(sizeof(*hdr)); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); } @@ -946,8 +946,10 @@ xfs_attr_shortform_verify( * struct xfs_attr_sf_entry has a variable length. * Check the fixed-offset parts of the structure are * within the data buffer. + * xfs_attr_sf_entry is defined with a 1-byte variable + * array at the end, so we must subtract that off. */ - if (((char *)sfep + sizeof(*sfep)) >= endp) + if (((char *)sfep + sizeof(*sfep) - 1) >= endp) return __this_address; /* Don't allow names with known bad length. */ diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 3f76da11197c..19a600443b9e 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -6179,7 +6179,7 @@ xfs_bmap_validate_extent( isrt = XFS_IS_REALTIME_INODE(ip); endfsb = irec->br_startblock + irec->br_blockcount - 1; - if (isrt) { + if (isrt && whichfork == XFS_DATA_FORK) { if (!xfs_verify_rtbno(mp, irec->br_startblock)) return __this_address; if (!xfs_verify_rtbno(mp, endfsb)) diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 588d44613094..443cf33f6666 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -679,7 +679,7 @@ xfs_ialloc_ag_alloc( args.minalignslop = igeo->cluster_align - 1; /* Allow space for the inode btree to split. */ - args.minleft = igeo->inobt_maxlevels - 1; + args.minleft = igeo->inobt_maxlevels; if ((error = xfs_alloc_vextent(&args))) return error; @@ -727,7 +727,7 @@ xfs_ialloc_ag_alloc( /* * Allow space for the inode btree to split. */ - args.minleft = igeo->inobt_maxlevels - 1; + args.minleft = igeo->inobt_maxlevels; if ((error = xfs_alloc_vextent(&args))) return error; } diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c index a9ad90926b87..6c7354abd0ae 100644 --- a/fs/xfs/libxfs/xfs_trans_inode.c +++ b/fs/xfs/libxfs/xfs_trans_inode.c @@ -36,6 +36,7 @@ xfs_trans_ijoin( ASSERT(iip->ili_lock_flags == 0); iip->ili_lock_flags = lock_flags; + ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); /* * Get a log_item_desc to point at the new item. @@ -91,6 +92,7 @@ xfs_trans_log_inode( ASSERT(ip->i_itemp != NULL); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); /* * Don't bother with i_lock for the I_DIRTY_TIME check here, as races diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h index 88221c7a04cc..7ad3659c5d2a 100644 --- a/fs/xfs/libxfs/xfs_trans_space.h +++ b/fs/xfs/libxfs/xfs_trans_space.h @@ -57,8 +57,8 @@ XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) #define XFS_IALLOC_SPACE_RES(mp) \ (M_IGEO(mp)->ialloc_blks + \ - (xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1 * \ - (M_IGEO(mp)->inobt_maxlevels - 1))) + ((xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1) * \ + M_IGEO(mp)->inobt_maxlevels)) /* * Space reservation values for various transactions. diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c index fa6ea6407992..392fb4df5c12 100644 --- a/fs/xfs/scrub/bmap.c +++ b/fs/xfs/scrub/bmap.c @@ -45,9 +45,27 @@ xchk_setup_inode_bmap( */ if (S_ISREG(VFS_I(sc->ip)->i_mode) && sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) { + struct address_space *mapping = VFS_I(sc->ip)->i_mapping; + inode_dio_wait(VFS_I(sc->ip)); - error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping); - if (error) + + /* + * Try to flush all incore state to disk before we examine the + * space mappings for the data fork. Leave accumulated errors + * in the mapping for the writer threads to consume. + * + * On ENOSPC or EIO writeback errors, we continue into the + * extent mapping checks because write failures do not + * necessarily imply anything about the correctness of the file + * metadata. The metadata and the file data could be on + * completely separate devices; a media failure might only + * affect a subset of the disk, etc. We can handle delalloc + * extents in the scrubber, so leaving them in memory is fine. + */ + error = filemap_fdatawrite(mapping); + if (!error) + error = filemap_fdatawait_keep_errors(mapping); + if (error && (error != -ENOSPC && error != -EIO)) goto out; } diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 4f443703065e..0c71acc1b831 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1760,7 +1760,7 @@ xfs_swap_extents( if (xfs_inode_has_cow_data(tip)) { error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true); if (error) - return error; + goto out_unlock; } /* diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 0abba171aa89..1264ac63e4e5 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1162,8 +1162,10 @@ xfs_buf_ioend( bp->b_ops->verify_read(bp); } - if (!bp->b_error) + if (!bp->b_error) { + bp->b_flags &= ~XBF_WRITE_FAIL; bp->b_flags |= XBF_DONE; + } if (bp->b_iodone) (*(bp->b_iodone))(bp); @@ -1223,7 +1225,7 @@ xfs_bwrite( bp->b_flags |= XBF_WRITE; bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | - XBF_WRITE_FAIL | XBF_DONE); + XBF_DONE); error = xfs_buf_submit(bp); if (error) @@ -1929,7 +1931,7 @@ xfs_buf_delwri_submit_buffers( * synchronously. Otherwise, drop the buffer from the delwri * queue and submit async. */ - bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL); + bp->b_flags &= ~_XBF_DELWRI_Q; bp->b_flags |= XBF_WRITE; if (wait_list) { bp->b_flags &= ~XBF_ASYNC; diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index aeb95e7391c1..3cbf248af51f 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -1116,13 +1116,12 @@ xfs_qm_dqflush( dqb = bp->b_addr + dqp->q_bufoffset; ddqp = &dqb->dd_diskdq; - /* - * A simple sanity check in case we got a corrupted dquot. - */ - fa = xfs_dqblk_verify(mp, dqb, be32_to_cpu(ddqp->d_id), 0); + /* sanity check the in-core structure before we flush */ + fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(dqp->q_core.d_id), + 0); if (fa) { xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS", - be32_to_cpu(ddqp->d_id), fa); + be32_to_cpu(dqp->q_core.d_id), fa); xfs_buf_relse(bp); xfs_dqfunlock(dqp); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 1ffb179f35d2..1e2176190c86 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1172,6 +1172,14 @@ __xfs_filemap_fault( return ret; } +static inline bool +xfs_is_write_fault( + struct vm_fault *vmf) +{ + return (vmf->flags & FAULT_FLAG_WRITE) && + (vmf->vma->vm_flags & VM_SHARED); +} + static vm_fault_t xfs_filemap_fault( struct vm_fault *vmf) @@ -1179,7 +1187,7 @@ xfs_filemap_fault( /* DAX can shortcut the normal fault path on write faults! */ return __xfs_filemap_fault(vmf, PE_SIZE_PTE, IS_DAX(file_inode(vmf->vma->vm_file)) && - (vmf->flags & FAULT_FLAG_WRITE)); + xfs_is_write_fault(vmf)); } static vm_fault_t @@ -1192,7 +1200,7 @@ xfs_filemap_huge_fault( /* DAX can shortcut the normal fault path on write faults! */ return __xfs_filemap_fault(vmf, pe_size, - (vmf->flags & FAULT_FLAG_WRITE)); + xfs_is_write_fault(vmf)); } static vm_fault_t diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index d95dc9b0f0bb..a1135b86e79f 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -1132,7 +1132,7 @@ restart: goto out_ifunlock; xfs_iunpin_wait(ip); } - if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { + if (xfs_inode_clean(ip)) { xfs_ifunlock(ip); goto reclaim; } @@ -1219,6 +1219,7 @@ reclaim: xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_qm_dqdetach(ip); xfs_iunlock(ip, XFS_ILOCK_EXCL); + ASSERT(xfs_inode_clean(ip)); __xfs_inode_free(ip); return error; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 18f4b262e61c..b339ff93df99 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1761,10 +1761,31 @@ xfs_inactive_ifree( return error; } + /* + * We do not hold the inode locked across the entire rolling transaction + * here. We only need to hold it for the first transaction that + * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the + * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode + * here breaks the relationship between cluster buffer invalidation and + * stale inode invalidation on cluster buffer item journal commit + * completion, and can result in leaving dirty stale inodes hanging + * around in memory. + * + * We have no need for serialising this inode operation against other + * operations - we freed the inode and hence reallocation is required + * and that will serialise on reallocating the space the deferops need + * to free. Hence we can unlock the inode on the first commit of + * the transaction rather than roll it right through the deferops. This + * avoids relogging the XFS_ISTALE inode. + * + * We check that xfs_ifree() hasn't grown an internal transaction roll + * by asserting that the inode is still locked when it returns. + */ xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, 0); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); error = xfs_ifree(tp, ip); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (error) { /* * If we fail to free the inode, shut down. The cancel @@ -1777,7 +1798,6 @@ xfs_inactive_ifree( xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); } xfs_trans_cancel(tp); - xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } @@ -1795,7 +1815,6 @@ xfs_inactive_ifree( xfs_notice(mp, "%s: xfs_trans_commit returned error %d", __func__, error); - xfs_iunlock(ip, XFS_ILOCK_EXCL); return 0; } diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 6a4fd1738b08..904d8285c226 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1005,6 +1005,7 @@ xfs_reflink_remap_extent( xfs_filblks_t rlen; xfs_filblks_t unmap_len; xfs_off_t newlen; + int64_t qres; int error; unmap_len = irec->br_startoff + irec->br_blockcount - destoff; @@ -1027,13 +1028,19 @@ xfs_reflink_remap_extent( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); - /* If we're not just clearing space, then do we have enough quota? */ - if (real_extent) { - error = xfs_trans_reserve_quota_nblks(tp, ip, - irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS); - if (error) - goto out_cancel; - } + /* + * Reserve quota for this operation. We don't know if the first unmap + * in the dest file will cause a bmap btree split, so we always reserve + * at least enough blocks for that split. If the extent being mapped + * in is written, we need to reserve quota for that too. + */ + qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); + if (real_extent) + qres += irec->br_blockcount; + error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0, + XFS_QMOPT_RES_REGBLKS); + if (error) + goto out_cancel; trace_xfs_reflink_remap(ip, irec->br_startoff, irec->br_blockcount, irec->br_startblock); diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h index e9f810fc6731..43585850f154 100644 --- a/fs/xfs/xfs_sysfs.h +++ b/fs/xfs/xfs_sysfs.h @@ -32,9 +32,11 @@ xfs_sysfs_init( struct xfs_kobj *parent_kobj, const char *name) { + struct kobject *parent; + + parent = parent_kobj ? &parent_kobj->kobject : NULL; init_completion(&kobj->complete); - return kobject_init_and_add(&kobj->kobject, ktype, - &parent_kobj->kobject, "%s", name); + return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name); } static inline void diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index 16457465833b..904780dd74aa 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -646,7 +646,7 @@ xfs_trans_dqresv( } } if (ninos > 0) { - total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; + total_count = dqp->q_res_icount + ninos; timer = be32_to_cpu(dqp->q_core.d_itimer); warns = be16_to_cpu(dqp->q_core.d_iwarns); warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h index 9439ff037b2d..5698fca3bf56 100644 --- a/include/asm-generic/mmiowb.h +++ b/include/asm-generic/mmiowb.h @@ -27,7 +27,7 @@ #include DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state); -#define __mmiowb_state() this_cpu_ptr(&__mmiowb_state) +#define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state) #else #define __mmiowb_state() arch_mmiowb_state() #endif /* arch_mmiowb_state */ @@ -35,7 +35,9 @@ DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state); static inline void mmiowb_set_pending(void) { struct mmiowb_state *ms = __mmiowb_state(); - ms->mmiowb_pending = ms->nesting_count; + + if (likely(ms->nesting_count)) + ms->mmiowb_pending = ms->nesting_count; } static inline void mmiowb_spin_lock(void) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index b6d7347ccda7..f050039ca2c0 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -306,7 +306,8 @@ #define PAGE_ALIGNED_DATA(page_align) \ . = ALIGN(page_align); \ - *(.data..page_aligned) + *(.data..page_aligned) \ + . = ALIGN(page_align); #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ @@ -339,6 +340,7 @@ */ #ifndef RO_AFTER_INIT_DATA #define RO_AFTER_INIT_DATA \ + . = ALIGN(8); \ __start_ro_after_init = .; \ *(.data..ro_after_init) \ JUMP_TABLE_DATA \ @@ -695,7 +697,9 @@ . = ALIGN(bss_align); \ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ BSS_FIRST_SECTIONS \ + . = ALIGN(PAGE_SIZE); \ *(.bss..page_aligned) \ + . = ALIGN(PAGE_SIZE); \ *(.dynbss) \ *(BSS_MAIN) \ *(COMMON) \ diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 24cfa96f98ea..c1a8d4a41bb1 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -29,8 +29,8 @@ struct alg_sock { struct sock *parent; - unsigned int refcnt; - unsigned int nokey_refcnt; + atomic_t refcnt; + atomic_t nokey_refcnt; const struct af_alg_type *type; void *private; @@ -135,6 +135,7 @@ struct af_alg_async_req { * SG? * @enc: Cryptographic operation to be performed when * recvmsg is invoked. + * @init: True if metadata has been sent. * @len: Length of memory allocated for this data structure. */ struct af_alg_ctx { @@ -151,6 +152,7 @@ struct af_alg_ctx { bool more; bool merge; bool enc; + bool init; unsigned int len; }; @@ -226,7 +228,7 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, size_t dst_offset); void af_alg_wmem_wakeup(struct sock *sk); -int af_alg_wait_for_data(struct sock *sk, unsigned flags); +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min); int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, unsigned int ivsize); ssize_t af_alg_sendpage(struct socket *sock, struct page *page, diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 3bcbe30339f0..198b9d060008 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -865,6 +865,18 @@ struct drm_mode_config { */ bool prefer_shadow_fbdev; + /** + * @fbdev_use_iomem: + * + * Set to true if framebuffer reside in iomem. + * When set to true memcpy_toio() is used when copying the framebuffer in + * drm_fb_helper.drm_fb_helper_dirty_blit_real(). + * + * FIXME: This should be replaced with a per-mapping is_iomem + * flag (like ttm does), and then used everywhere in fbdev code. + */ + bool fbdev_use_iomem; + /** * @quirk_addfb_prefer_xbgr_30bpp: * diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 4bbb5f1c8b5b..4c0224ff0a14 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -64,7 +64,7 @@ */ #define FIELD_FIT(_mask, _val) \ ({ \ - __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ }) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index c94a9ff9f082..4f0e62cbf2ff 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -57,7 +57,7 @@ static inline int get_bitmask_order(unsigned int count) static __always_inline unsigned long hweight_long(unsigned long w) { - return sizeof(w) == 4 ? hweight32(w) : hweight64(w); + return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); } /** diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index bff1def62eed..d5338b9ee550 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -592,6 +592,7 @@ struct request_queue { u64 write_hints[BLK_MAX_WRITE_HINTS]; }; +/* Keep blk_queue_flag_name[] in sync with the definitions below */ #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ #define QUEUE_FLAG_DYING 1 /* queue being torn down */ #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3bf3835d0e86..7aa0d8b5aaf0 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -956,11 +956,14 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ #if defined(CONFIG_BPF_STREAM_PARSER) -int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, + struct bpf_prog *old, u32 which); int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); #else static inline int sock_map_prog_update(struct bpf_map *map, - struct bpf_prog *prog, u32 which) + struct bpf_prog *prog, + struct bpf_prog *old, u32 which) { return -EOPNOTSUPP; } @@ -970,6 +973,12 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr, { return -EINVAL; } + +static inline int sock_map_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype) +{ + return -EOPNOTSUPP; +} #endif #if defined(CONFIG_XDP_SOCKETS) diff --git a/include/linux/bvec.h b/include/linux/bvec.h index a032f01e928c..d7a628e066ee 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -110,11 +110,18 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv, return true; } +static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter) +{ + iter->bi_bvec_done = 0; + iter->bi_idx++; +} + #define for_each_bvec(bvl, bio_vec, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ - bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) + (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \ + (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter))) /* for iterating one bio from start to end */ #define BVEC_ITER_ALL_INIT (struct bvec_iter) \ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 430e219e3aba..1ccfa3779e18 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -797,7 +797,9 @@ struct sock_cgroup_data { union { #ifdef __LITTLE_ENDIAN struct { - u8 is_data; + u8 is_data : 1; + u8 no_refcnt : 1; + u8 unused : 6; u8 padding; u16 prioidx; u32 classid; @@ -807,7 +809,9 @@ struct sock_cgroup_data { u32 classid; u16 prioidx; u8 padding; - u8 is_data; + u8 unused : 6; + u8 no_refcnt : 1; + u8 is_data : 1; } __packed; #endif u64 val; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 57577075d204..202852383ae9 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -822,6 +822,7 @@ extern spinlock_t cgroup_sk_update_lock; void cgroup_sk_alloc_disable(void); void cgroup_sk_alloc(struct sock_cgroup_data *skcd); +void cgroup_sk_clone(struct sock_cgroup_data *skcd); void cgroup_sk_free(struct sock_cgroup_data *skcd); static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) @@ -835,7 +836,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) */ v = READ_ONCE(skcd->val); - if (v & 1) + if (v & 3) return &cgrp_dfl_root.cgrp; return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; @@ -847,6 +848,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) #else /* CONFIG_CGROUP_DATA */ static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} +static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {} static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} #endif /* CONFIG_CGROUP_DATA */ diff --git a/include/linux/dax.h b/include/linux/dax.h index 9bd8528bd305..72a7f03a59f4 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -56,6 +56,8 @@ static inline void set_dax_synchronous(struct dax_device *dax_dev) { __set_dax_synchronous(dax_dev); } +bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, + int blocksize, sector_t start, sector_t len); /* * Check if given mapping is supported by the file / underlying device. */ @@ -102,6 +104,12 @@ static inline bool dax_synchronous(struct dax_device *dax_dev) static inline void set_dax_synchronous(struct dax_device *dax_dev) { } +static inline bool dax_supported(struct dax_device *dax_dev, + struct block_device *bdev, int blocksize, sector_t start, + sector_t len) +{ + return false; +} static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, struct dax_device *dax_dev) { @@ -197,14 +205,23 @@ static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) } #endif +#if IS_ENABLED(CONFIG_DAX) int dax_read_lock(void); void dax_read_unlock(int id); +#else +static inline int dax_read_lock(void) +{ + return 0; +} + +static inline void dax_read_unlock(int id) +{ +} +#endif /* CONFIG_DAX */ bool dax_alive(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev); long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); -bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, - int blocksize, sector_t start, sector_t len); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 399ad8632356..e4e1f5c1f492 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -420,6 +420,7 @@ const char *dm_device_name(struct mapped_device *md); int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct dm_target *ti); +int dm_post_suspending(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); void dm_remap_zone_report(struct dm_target *ti, sector_t start, diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index ec212cb27fdc..12eac4293af6 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -303,6 +303,7 @@ struct dma_buf { void *vmap_ptr; const char *exp_name; const char *name; + spinlock_t name_lock; /* spinlock to protect name access */ struct module *owner; struct list_head list_node; void *priv; diff --git a/include/linux/efi.h b/include/linux/efi.h index d87acf62958e..13ed2c6b13f8 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1039,7 +1039,11 @@ extern void *efi_get_pal_addr (void); extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec64 *ts); +#ifdef CONFIG_EFI extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ +#else +static inline void efi_enter_virtual_mode (void) {} +#endif #ifdef CONFIG_X86 extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, diff --git a/include/linux/fb.h b/include/linux/fb.h index c453ed48f2c2..71a62c29aa61 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -412,8 +412,6 @@ struct fb_tile_ops { #define FBINFO_HWACCEL_YPAN 0x2000 /* optional */ #define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */ -#define FBINFO_MISC_USEREVENT 0x10000 /* event request - from userspace */ #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ /* A driver may set this flag to indicate that it does want a set_par to be diff --git a/include/linux/filter.h b/include/linux/filter.h index 3bbc72dbc69e..79830bc9e45c 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -853,12 +853,12 @@ void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); bool bpf_helper_changes_pkt_data(void *func); -static inline bool bpf_dump_raw_ok(void) +static inline bool bpf_dump_raw_ok(const struct cred *cred) { /* Reconstruction of call-sites is dependent on kallsyms, * thus make dump the same restriction. */ - return kallsyms_show_value() == 1; + return kallsyms_show_value(cred); } struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, diff --git a/include/linux/fs.h b/include/linux/fs.h index 5bd384dbdca5..4c82683e034a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2140,6 +2140,10 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) * * I_CREATING New object's inode in the middle of setting up. * + * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists. + * Used to detect that mark_inode_dirty() should not move + * inode between dirty lists. + * * Q: What is the difference between I_WILL_FREE and I_FREEING? */ #define I_DIRTY_SYNC (1 << 0) @@ -2157,11 +2161,11 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) #define I_LINKABLE (1 << 10) #define I_DIRTY_TIME (1 << 11) -#define __I_DIRTY_TIME_EXPIRED 12 -#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) +#define I_DIRTY_TIME_EXPIRED (1 << 12) #define I_WB_SWITCH (1 << 13) #define I_OVL_INUSE (1 << 14) #define I_CREATING (1 << 15) +#define I_SYNC_QUEUED (1 << 17) #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 8b5330dd5ac0..62a2ec9f17df 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -750,9 +750,11 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part) static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) + preempt_disable(); write_seqcount_begin(&part->nr_sects_seq); part->nr_sects = size; write_seqcount_end(&part->nr_sects_seq); + preempt_enable(); #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) preempt_disable(); part->nr_sects = size; diff --git a/include/linux/hid.h b/include/linux/hid.h index 875f71132b14..c7044a14200e 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -959,34 +959,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) { * @max: maximal valid usage->code to consider later (out parameter) * @type: input event type (EV_KEY, EV_REL, ...) * @c: code which corresponds to this usage and type + * + * The value pointed to by @bit will be set to NULL if either @type is + * an unhandled event type, or if @c is out of range for @type. This + * can be used as an error condition. */ static inline void hid_map_usage(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, - __u8 type, __u16 c) + __u8 type, unsigned int c) { struct input_dev *input = hidinput->input; - - usage->type = type; - usage->code = c; + unsigned long *bmap = NULL; + unsigned int limit = 0; switch (type) { case EV_ABS: - *bit = input->absbit; - *max = ABS_MAX; + bmap = input->absbit; + limit = ABS_MAX; break; case EV_REL: - *bit = input->relbit; - *max = REL_MAX; + bmap = input->relbit; + limit = REL_MAX; break; case EV_KEY: - *bit = input->keybit; - *max = KEY_MAX; + bmap = input->keybit; + limit = KEY_MAX; break; case EV_LED: - *bit = input->ledbit; - *max = LED_MAX; + bmap = input->ledbit; + limit = LED_MAX; break; } + + if (unlikely(c > limit || !bmap)) { + pr_warn_ratelimited("%s: Invalid code %d type %d\n", + input->name, c, type); + *bit = NULL; + return; + } + + usage->type = type; + usage->code = c; + *max = limit; + *bit = bmap; } /** @@ -1000,7 +1015,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput, __u8 type, __u16 c) { hid_map_usage(hidinput, usage, bit, max, type, c); - clear_bit(c, *bit); + if (*bit) + clear_bit(usage->code, *bit); } /** diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index b4a017093b69..67d9b5a37460 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -423,6 +423,8 @@ enum vmbus_channel_message_type { CHANNELMSG_19 = 19, CHANNELMSG_20 = 20, CHANNELMSG_TL_CONNECT_REQUEST = 21, + CHANNELMSG_22 = 22, + CHANNELMSG_TL_CONNECT_RESULT = 23, CHANNELMSG_COUNT }; diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h index d03071732db4..7c522fdd9ea7 100644 --- a/include/linux/i2c-algo-pca.h +++ b/include/linux/i2c-algo-pca.h @@ -53,6 +53,20 @@ #define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */ #define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */ +/** + * struct pca_i2c_bus_settings - The configured PCA i2c bus settings + * @mode: Configured i2c bus mode + * @tlow: Configured SCL LOW period + * @thi: Configured SCL HIGH period + * @clock_freq: The configured clock frequency + */ +struct pca_i2c_bus_settings { + int mode; + int tlow; + int thi; + int clock_freq; +}; + struct i2c_algo_pca_data { void *data; /* private low level data */ void (*write_byte) (void *data, int reg, int val); @@ -64,6 +78,7 @@ struct i2c_algo_pca_data { * For PCA9665, use the frequency you want here. */ unsigned int i2c_clock; unsigned int chip; + struct pca_i2c_bus_settings bus_settings; }; int i2c_pca_add_bus(struct i2c_adapter *); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index b05e855f1ddd..41a518336673 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -25,6 +25,8 @@ #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ +#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */ + /* * struct vlan_hdr - vlan header * @h_vlan_TCI: priority and VLAN ID @@ -577,10 +579,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, +static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, int *depth) { - unsigned int vlan_depth = skb->mac_len; + unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; /* if type is 802.1Q/AD then the header should already be * present at mac_len - VLAN_HLEN (if mac_len > 0), or at @@ -595,13 +597,12 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, vlan_depth = ETH_HLEN; } do { - struct vlan_hdr *vh; + struct vlan_hdr vhdr, *vh; - if (unlikely(!pskb_may_pull(skb, - vlan_depth + VLAN_HLEN))) + vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); + if (unlikely(!vh || !--parse_depth)) return 0; - vh = (struct vlan_hdr *)(skb->data + vlan_depth); type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; } while (eth_type_vlan(type)); @@ -620,11 +621,25 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 vlan_get_protocol(struct sk_buff *skb) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) { return __vlan_get_protocol(skb, skb->protocol, NULL); } +/* A getter for the SKB protocol field which will handle VLAN tags consistently + * whether VLAN acceleration is enabled or not. + */ +static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan) +{ + if (!skip_vlan) + /* VLAN acceleration strips the VLAN header from the skb and + * moves it to skb->vlan_proto + */ + return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol; + + return vlan_get_protocol(skb); +} + static inline void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) { diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h index 1ecb6b45812c..520858d12680 100644 --- a/include/linux/input/elan-i2c-ids.h +++ b/include/linux/input/elan-i2c-ids.h @@ -67,8 +67,15 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN062B", 0 }, { "ELAN062C", 0 }, { "ELAN062D", 0 }, + { "ELAN062E", 0 }, /* Lenovo V340 Whiskey Lake U */ + { "ELAN062F", 0 }, /* Lenovo V340 Comet Lake U */ { "ELAN0631", 0 }, { "ELAN0632", 0 }, + { "ELAN0633", 0 }, /* Lenovo S145 */ + { "ELAN0634", 0 }, /* Lenovo V340 Ice lake */ + { "ELAN0635", 0 }, /* Lenovo V1415-IIL */ + { "ELAN0636", 0 }, /* Lenovo V1415-Dali */ + { "ELAN0637", 0 }, /* Lenovo V1415-IGLR */ { "ELAN1000", 0 }, { } }; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 1e5dad8b8e59..ed870da78326 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -359,8 +359,8 @@ enum { #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) -#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) -#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1) +#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 6e125e9b4187..b9c91d321240 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -108,9 +108,12 @@ io_mapping_init_wc(struct io_mapping *iomap, resource_size_t base, unsigned long size) { + iomap->iomem = ioremap_wc(base, size); + if (!iomap->iomem) + return NULL; + iomap->base = base; iomap->size = size; - iomap->iomem = ioremap_wc(base, size); #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); #elif defined(pgprot_writecombine) diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 7bddddfc76d6..fdc201d61460 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -300,5 +300,11 @@ struct resource *devm_request_free_mem_region(struct device *dev, struct resource *request_free_mem_region(struct resource *base, unsigned long size, const char *name); +#ifdef CONFIG_IO_STRICT_DEVMEM +void revoke_devmem(struct resource *res); +#else +static inline void revoke_devmem(struct resource *res) { }; +#endif + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/irq.h b/include/linux/irq.h index f8755e5fcd74..e9e69c511ea9 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -211,6 +211,8 @@ struct irq_data { * IRQD_CAN_RESERVE - Can use reservation mode * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change * required + * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call + * irq_chip::irq_set_affinity() when deactivated. */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -234,6 +236,7 @@ enum { IRQD_DEFAULT_TRIGGER_SET = (1 << 25), IRQD_CAN_RESERVE = (1 << 26), IRQD_MSI_NOMASK_QUIRK = (1 << 27), + IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -408,6 +411,16 @@ static inline bool irqd_msi_nomask_quirk(struct irq_data *d) return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK; } +static inline void irqd_set_affinity_on_activate(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE; +} + +static inline bool irqd_affinity_on_activate(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 10e6049c0ba9..b0e97e5de8ca 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1402,7 +1402,6 @@ extern int jbd2_journal_skip_recovery (journal_t *); extern void jbd2_journal_update_sb_errno(journal_t *); extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t, unsigned long, int); -extern void __jbd2_journal_abort_hard (journal_t *); extern void jbd2_journal_abort (journal_t *, int); extern int jbd2_journal_errno (journal_t *); extern void jbd2_journal_ack_err (journal_t *); diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 657a83b943f0..1f96ce2b47df 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -18,6 +18,7 @@ #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) +struct cred; struct module; static inline int is_kernel_inittext(unsigned long addr) @@ -98,7 +99,7 @@ int lookup_symbol_name(unsigned long addr, char *symname); int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); /* How and when do we show kallsyms values? */ -extern int kallsyms_show_value(void); +extern bool kallsyms_show_value(const struct cred *cred); #else /* !CONFIG_KALLSYMS */ @@ -158,7 +159,7 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u return -ERANGE; } -static inline int kallsyms_show_value(void) +static inline bool kallsyms_show_value(const struct cred *cred) { return false; } diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index b072aeb1fd78..4d6fe87fd38f 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -323,7 +323,7 @@ extern void gdbstub_exit(int status); extern int kgdb_single_step; extern atomic_t kgdb_active; #define in_dbg_master() \ - (raw_smp_processor_id() == atomic_read(&kgdb_active)) + (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active))) extern bool dbg_is_early; extern void __init dbg_late_init(void); extern void kgdb_panic(const char *msg); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 04bdaf01112c..645fd401c856 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -350,6 +350,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) return this_cpu_ptr(&kprobe_ctlblk); } +extern struct kprobe kprobe_busy; +void kprobe_busy_begin(void); +void kprobe_busy_end(void); + kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); diff --git a/include/linux/libata.h b/include/linux/libata.h index c44e4cfbcb16..e752368ea351 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -22,6 +22,7 @@ #include #include #include +#include /* * Define if arch has non-standard setup. This is a _PCI_ standard @@ -421,6 +422,7 @@ enum { ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ + ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ @@ -870,6 +872,8 @@ struct ata_port { struct timer_list fastdrain_timer; unsigned long fastdrain_cnt; + async_cookie_t cookie; + int em_message_type; void *private_data; diff --git a/include/linux/log2.h b/include/linux/log2.h index 83a4a3ca3e8a..c619ec6eff4a 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -173,7 +173,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define roundup_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n == 1) ? 1 : \ + ((n) == 1) ? 1 : \ (1UL << (ilog2((n) - 1) + 1)) \ ) : \ __roundup_pow_of_two(n) \ diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h index 3c67983678ec..744dce63946e 100644 --- a/include/linux/mfd/stmfx.h +++ b/include/linux/mfd/stmfx.h @@ -109,6 +109,7 @@ struct stmfx { struct device *dev; struct regmap *map; struct regulator *vdd; + int irq; struct irq_domain *irq_domain; struct mutex lock; /* IRQ bus lock */ u8 irq_src; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index acd859ea09d4..aba56077cfda 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4177,6 +4177,7 @@ struct mlx5_ifc_query_vport_state_out_bits { enum { MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, + MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2, }; struct mlx5_ifc_arm_monitor_counter_in_bits { diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 8b5f758942a2..85804ba62215 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -709,6 +709,8 @@ typedef struct pglist_data { /* * Must be held any time you expect node_start_pfn, * node_present_pages, node_spanned_pages or nr_zones to stay constant. + * Also synchronizes pgdat->first_deferred_pfn during deferred page + * init. * * pgdat_resize_lock() and pgdat_resize_unlock() are provided to * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 953d7ca01eb6..4c56404e53a7 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -318,7 +318,7 @@ struct pcmcia_device_id { #define INPUT_DEVICE_ID_LED_MAX 0x0f #define INPUT_DEVICE_ID_SND_MAX 0x07 #define INPUT_DEVICE_ID_FF_MAX 0x7f -#define INPUT_DEVICE_ID_SW_MAX 0x0f +#define INPUT_DEVICE_ID_SW_MAX 0x10 #define INPUT_DEVICE_ID_PROP_MAX 0x1f #define INPUT_DEVICE_ID_MATCH_BUS 1 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b580a35f50ea..ec3081ab04c0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3043,7 +3043,7 @@ static inline int dev_recursion_level(void) return this_cpu_read(softnet_data.xmit.recursion); } -#define XMIT_RECURSION_LIMIT 10 +#define XMIT_RECURSION_LIMIT 8 static inline bool dev_xmit_recursion(void) { return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h index 9a33f171aa82..625f491b95de 100644 --- a/include/linux/netfilter/nf_conntrack_sctp.h +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -9,6 +9,8 @@ struct ip_ct_sctp { enum sctp_conntrack state; __be32 vtag[IP_CT_DIR_MAX]; + u8 last_dir; + u8 flags; }; #endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 851425c3178f..89016d08f6a2 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -43,8 +43,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags); +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid); static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) { diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index aac42c28fe62..9b67394471e1 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -58,7 +58,6 @@ struct nf_ipv6_ops { int (*output)(struct net *, struct sock *, struct sk_buff *)); int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); #if IS_MODULE(CONFIG_IPV6) - int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user); int (*br_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, @@ -117,23 +116,6 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, #include -static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb, - u32 user) -{ -#if IS_MODULE(CONFIG_IPV6) - const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); - - if (!v6_ops) - return 1; - - return v6_ops->br_defrag(net, skb, user); -#elif IS_BUILTIN(CONFIG_IPV6) - return nf_ct_frag6_gather(net, skb, user); -#else - return 1; -#endif -} - int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 570a60c2f4f4..ad09c0cc5464 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -225,6 +225,7 @@ struct nfs4_copy_state { #define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ #define NFS_INO_DATA_INVAL_DEFER \ BIT(13) /* Deferred cache invalidation */ +#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index ec88f3d46143..d8c12dae05c6 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -148,6 +148,8 @@ /* Vendors and devices. Sort key: vendor first, device next. */ +#define PCI_VENDOR_ID_LOONGSON 0x0014 + #define PCI_VENDOR_ID_TTTECH 0x0357 #define PCI_DEVICE_ID_TTTECH_MC322 0x000a @@ -548,7 +550,9 @@ #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 +#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 @@ -1830,6 +1834,12 @@ #define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 #define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 +#define PCI_VENDOR_ID_PERICOM 0x12D8 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 + #define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021 @@ -3009,6 +3019,7 @@ #define PCI_DEVICE_ID_INTEL_84460GX 0x84ea #define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500 #define PCI_DEVICE_ID_INTEL_IXP2800 0x9004 +#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b #define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 #define PCI_VENDOR_ID_SCALEMP 0x8686 diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 2cbde6542849..5fcc9bc9e751 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -49,6 +49,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22) #define SYSC_QUIRK_CLKDM_NOAUTO BIT(21) #define SYSC_QUIRK_FORCE_MSTANDBY BIT(20) #define SYSC_MODULE_QUIRK_AESS BIT(19) diff --git a/include/linux/prandom.h b/include/linux/prandom.h new file mode 100644 index 000000000000..aa16e6468f91 --- /dev/null +++ b/include/linux/prandom.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/prandom.h + * + * Include file for the fast pseudo-random 32-bit + * generation. + */ +#ifndef _LINUX_PRANDOM_H +#define _LINUX_PRANDOM_H + +#include +#include + +u32 prandom_u32(void); +void prandom_bytes(void *buf, size_t nbytes); +void prandom_seed(u32 seed); +void prandom_reseed_late(void); + +struct rnd_state { + __u32 s1, s2, s3, s4; +}; + +DECLARE_PER_CPU(struct rnd_state, net_rand_state); + +u32 prandom_u32_state(struct rnd_state *state); +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); + +#define prandom_init_once(pcpu_state) \ + DO_ONCE(prandom_seed_full_state, (pcpu_state)) + +/** + * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) + * @ep_ro: right open interval endpoint + * + * Returns a pseudo-random number that is in interval [0, ep_ro). Note + * that the result depends on PRNG being well distributed in [0, ~0U] + * u32 space. Here we use maximally equidistributed combined Tausworthe + * generator, that is, prandom_u32(). This is useful when requesting a + * random index of an array containing ep_ro elements, for example. + * + * Returns: pseudo-random number in interval [0, ep_ro) + */ +static inline u32 prandom_u32_max(u32 ep_ro) +{ + return (u32)(((u64) prandom_u32() * ep_ro) >> 32); +} + +/* + * Handle minimum values for seeds + */ +static inline u32 __seed(u32 x, u32 m) +{ + return (x < m) ? x + m : x; +} + +/** + * prandom_seed_state - set seed for prandom_u32_state(). + * @state: pointer to state structure to receive the seed. + * @seed: arbitrary 64-bit value to use as a seed. + */ +static inline void prandom_seed_state(struct rnd_state *state, u64 seed) +{ + u32 i = (seed >> 32) ^ (seed << 10) ^ seed; + + state->s1 = __seed(i, 2U); + state->s2 = __seed(i, 8U); + state->s3 = __seed(i, 16U); + state->s4 = __seed(i, 128U); +} + +/* Pseudo random number generator from numerical recipes. */ +static inline u32 next_pseudo_random32(u32 seed) +{ + return seed * 1664525 + 1013904223; +} + +#endif diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 733fad7dfbed..6d15040c642c 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) { + u16 elem_per_page = p_chain->elem_per_page; + u32 prod = p_chain->u.chain16.prod_idx; + u32 cons = p_chain->u.chain16.cons_idx; u16 used; - used = (u16) (((u32)0x10000 + - (u32)p_chain->u.chain16.prod_idx) - - (u32)p_chain->u.chain16.cons_idx); + if (prod < cons) + prod += (u32)U16_MAX + 1; + + used = (u16)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - - p_chain->u.chain16.cons_idx / p_chain->elem_per_page; + used -= prod / elem_per_page - cons / elem_per_page; return (u16)(p_chain->capacity - used); } static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) { + u16 elem_per_page = p_chain->elem_per_page; + u64 prod = p_chain->u.chain32.prod_idx; + u64 cons = p_chain->u.chain32.cons_idx; u32 used; - used = (u32) (((u64)0x100000000ULL + - (u64)p_chain->u.chain32.prod_idx) - - (u64)p_chain->u.chain32.cons_idx); + if (prod < cons) + prod += (u64)U32_MAX + 1; + + used = (u32)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - - p_chain->u.chain32.cons_idx / p_chain->elem_per_page; + used -= (u32)(prod / elem_per_page - cons / elem_per_page); return p_chain->capacity - used; } diff --git a/include/linux/random.h b/include/linux/random.h index f189c927fdea..5b3ec7d2791f 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -108,61 +108,12 @@ declare_get_random_var_wait(long) unsigned long randomize_page(unsigned long start, unsigned long range); -u32 prandom_u32(void); -void prandom_bytes(void *buf, size_t nbytes); -void prandom_seed(u32 seed); -void prandom_reseed_late(void); - -struct rnd_state { - __u32 s1, s2, s3, s4; -}; - -u32 prandom_u32_state(struct rnd_state *state); -void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); -void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); - -#define prandom_init_once(pcpu_state) \ - DO_ONCE(prandom_seed_full_state, (pcpu_state)) - -/** - * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) - * @ep_ro: right open interval endpoint - * - * Returns a pseudo-random number that is in interval [0, ep_ro). Note - * that the result depends on PRNG being well distributed in [0, ~0U] - * u32 space. Here we use maximally equidistributed combined Tausworthe - * generator, that is, prandom_u32(). This is useful when requesting a - * random index of an array containing ep_ro elements, for example. - * - * Returns: pseudo-random number in interval [0, ep_ro) - */ -static inline u32 prandom_u32_max(u32 ep_ro) -{ - return (u32)(((u64) prandom_u32() * ep_ro) >> 32); -} - /* - * Handle minimum values for seeds + * This is designed to be standalone for just prandom + * users, but for now we include it from + * for legacy reasons. */ -static inline u32 __seed(u32 x, u32 m) -{ - return (x < m) ? x + m : x; -} - -/** - * prandom_seed_state - set seed for prandom_u32_state(). - * @state: pointer to state structure to receive the seed. - * @seed: arbitrary 64-bit value to use as a seed. - */ -static inline void prandom_seed_state(struct rnd_state *state, u64 seed) -{ - u32 i = (seed >> 32) ^ (seed << 10) ^ seed; - - state->s1 = __seed(i, 2U); - state->s2 = __seed(i, 8U); - state->s3 = __seed(i, 16U); - state->s4 = __seed(i, 128U); -} +#include #ifdef CONFIG_ARCH_RANDOM # include @@ -193,10 +144,4 @@ static inline bool arch_has_random_seed(void) } #endif -/* Pseudo random number generator from numerical recipes. */ -static inline u32 next_pseudo_random32(u32 seed) -{ - return seed * 1664525 + 1013904223; -} - #endif /* _LINUX_RANDOM_H */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index beb9a9da1699..c5bf21261bb1 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -349,11 +349,11 @@ static inline void rht_unlock(struct bucket_table *tbl, local_bh_enable(); } -static inline struct rhash_head __rcu *__rht_ptr( - struct rhash_lock_head *const *bkt) +static inline struct rhash_head *__rht_ptr( + struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt) { - return (struct rhash_head __rcu *) - ((unsigned long)*bkt & ~BIT(0) ?: + return (struct rhash_head *) + ((unsigned long)p & ~BIT(0) ?: (unsigned long)RHT_NULLS_MARKER(bkt)); } @@ -365,25 +365,26 @@ static inline struct rhash_head __rcu *__rht_ptr( * access is guaranteed, such as when destroying the table. */ static inline struct rhash_head *rht_ptr_rcu( - struct rhash_lock_head *const *bkt) + struct rhash_lock_head *const *p) { - struct rhash_head __rcu *p = __rht_ptr(bkt); - - return rcu_dereference(p); + struct rhash_lock_head __rcu *const *bkt = (void *)p; + return __rht_ptr(rcu_dereference(*bkt), bkt); } static inline struct rhash_head *rht_ptr( - struct rhash_lock_head *const *bkt, + struct rhash_lock_head *const *p, struct bucket_table *tbl, unsigned int hash) { - return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash); + struct rhash_lock_head __rcu *const *bkt = (void *)p; + return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); } static inline struct rhash_head *rht_ptr_exclusive( - struct rhash_lock_head *const *bkt) + struct rhash_lock_head *const *p) { - return rcu_dereference_protected(__rht_ptr(bkt), 1); + struct rhash_lock_head __rcu *const *bkt = (void *)p; + return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt); } static inline void rht_assign_locked(struct rhash_lock_head **bkt, diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index c49257a3b510..a132d875d351 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -49,6 +49,8 @@ static inline void mmdrop(struct mm_struct *mm) __mmdrop(mm); } +void mmdrop(struct mm_struct *mm); + /* * This has to be called after a get_task_mm()/mmget_not_zero() * followed by taking the mmap_sem for writing before modifying the diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index a3adbe593505..20f3550b0b11 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -450,6 +450,19 @@ static inline void psock_set_prog(struct bpf_prog **pprog, bpf_prog_put(prog); } +static inline int psock_replace_prog(struct bpf_prog **pprog, + struct bpf_prog *prog, + struct bpf_prog *old) +{ + if (cmpxchg(pprog, old, prog) != old) + return -ENOENT; + + if (old) + bpf_prog_put(old); + + return 0; +} + static inline void psock_progs_drop(struct sk_psock_progs *progs) { psock_set_prog(&progs->msg_parser, NULL); @@ -457,4 +470,12 @@ static inline void psock_progs_drop(struct sk_psock_progs *progs) psock_set_prog(&progs->skb_verdict, NULL); } +int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); + +static inline bool sk_psock_strp_enabled(struct sk_psock *psock) +{ + if (!psock) + return false; + return psock->parser.enabled; +} #endif /* _LINUX_SKMSG_H */ diff --git a/include/linux/string.h b/include/linux/string.h index b6ccdc2c7f02..b2264355272d 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -269,6 +269,31 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) + +#ifdef CONFIG_KASAN +extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); +extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); +extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); +extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); +extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); +extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); +extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); +extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); +extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); +extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); +#else +#define __underlying_memchr __builtin_memchr +#define __underlying_memcmp __builtin_memcmp +#define __underlying_memcpy __builtin_memcpy +#define __underlying_memmove __builtin_memmove +#define __underlying_memset __builtin_memset +#define __underlying_strcat __builtin_strcat +#define __underlying_strcpy __builtin_strcpy +#define __underlying_strlen __builtin_strlen +#define __underlying_strncat __builtin_strncat +#define __underlying_strncpy __builtin_strncpy +#endif + __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -276,14 +301,14 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) __write_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_strncpy(p, q, size); + return __underlying_strncpy(p, q, size); } __FORTIFY_INLINE char *strcat(char *p, const char *q) { size_t p_size = __builtin_object_size(p, 0); if (p_size == (size_t)-1) - return __builtin_strcat(p, q); + return __underlying_strcat(p, q); if (strlcat(p, q, p_size) >= p_size) fortify_panic(__func__); return p; @@ -297,7 +322,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) /* Work around gcc excess stack consumption issue */ if (p_size == (size_t)-1 || (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) - return __builtin_strlen(p); + return __underlying_strlen(p); ret = strnlen(p, p_size); if (p_size <= ret) fortify_panic(__func__); @@ -330,7 +355,7 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) __write_overflow(); if (len >= p_size) fortify_panic(__func__); - __builtin_memcpy(p, q, len); + __underlying_memcpy(p, q, len); p[len] = '\0'; } return ret; @@ -343,12 +368,12 @@ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __builtin_strncat(p, q, count); + return __underlying_strncat(p, q, count); p_len = strlen(p); copy_len = strnlen(q, count); if (p_size < p_len + copy_len + 1) fortify_panic(__func__); - __builtin_memcpy(p + p_len, q, copy_len); + __underlying_memcpy(p + p_len, q, copy_len); p[p_len + copy_len] = '\0'; return p; } @@ -360,7 +385,7 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) __write_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_memset(p, c, size); + return __underlying_memset(p, c, size); } __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) @@ -375,7 +400,7 @@ __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memcpy(p, q, size); + return __underlying_memcpy(p, q, size); } __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) @@ -390,7 +415,7 @@ __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memmove(p, q, size); + return __underlying_memmove(p, q, size); } extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); @@ -416,7 +441,7 @@ __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memcmp(p, q, size); + return __underlying_memcmp(p, q, size); } __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) @@ -426,7 +451,7 @@ __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) __read_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_memchr(p, c, size); + return __underlying_memchr(p, c, size); } void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); @@ -457,11 +482,22 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q) size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __builtin_strcpy(p, q); + return __underlying_strcpy(p, q); memcpy(p, q, strlen(q) + 1); return p; } +/* Don't use these outside the FORITFY_SOURCE implementation */ +#undef __underlying_memchr +#undef __underlying_memcmp +#undef __underlying_memcpy +#undef __underlying_memmove +#undef __underlying_memset +#undef __underlying_strcat +#undef __underlying_strcpy +#undef __underlying_strlen +#undef __underlying_strncat +#undef __underlying_strncpy #endif /** diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index d4326d6662a4..b5a4eb14f809 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -85,6 +85,7 @@ struct pf_desc { u32 service; char *name; char *auth_domain_name; + struct auth_domain *domain; bool datatouch; }; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 1afe38eb33f7..82665ff360fd 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -517,6 +517,9 @@ void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); char * svc_print_addr(struct svc_rqst *, char *, size_t); +int svc_encode_read_payload(struct svc_rqst *rqstp, + unsigned int offset, + unsigned int length); unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages, struct kvec *first, size_t total); diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index fddad9f5b390..26f282e5e082 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -137,6 +137,8 @@ struct svc_rdma_recv_ctxt { unsigned int rc_page_count; unsigned int rc_hdr_count; u32 rc_inv_rkey; + unsigned int rc_read_payload_offset; + unsigned int rc_read_payload_length; struct page *rc_pages[RPCSVC_MAXPAGES]; }; @@ -171,7 +173,9 @@ extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head, __be32 *p); extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, - __be32 *wr_ch, struct xdr_buf *xdr); + __be32 *wr_ch, struct xdr_buf *xdr, + unsigned int offset, + unsigned long length); extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, bool writelist, struct xdr_buf *xdr); @@ -190,6 +194,8 @@ extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt, struct xdr_buf *xdr, __be32 *wr_lst); extern int svc_rdma_sendto(struct svc_rqst *); +extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length); /* svc_rdma_transport.c */ extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index ea6f46be9cb7..9e1e046de176 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -21,6 +21,8 @@ struct svc_xprt_ops { int (*xpo_has_wspace)(struct svc_xprt *); int (*xpo_recvfrom)(struct svc_rqst *); int (*xpo_sendto)(struct svc_rqst *); + int (*xpo_read_payload)(struct svc_rqst *, unsigned int, + unsigned int); void (*xpo_release_rqst)(struct svc_rqst *); void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index a4528b26c8aa..d229d27ab19e 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h @@ -21,7 +21,8 @@ int gss_svc_init(void); void gss_svc_shutdown(void); int gss_svc_init_net(struct net *net); void gss_svc_shutdown_net(struct net *net); -int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); +struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, + char *name); u32 svcauth_gss_flavor(struct auth_domain *dom); #endif /* __KERNEL__ */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 668e25a76d69..358deb4ff830 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -216,6 +216,8 @@ struct tcp_sock { } rack; u16 advmss; /* Advertised MSS */ u8 compressed_ack; + u8 tlp_retrans:1, /* TLP is a retransmission */ + unused_1:7; u32 chrono_start; /* Start time in jiffies of a TCP chrono */ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ u8 chrono_type:2, /* current chronograph type */ @@ -238,7 +240,7 @@ struct tcp_sock { save_syn:1, /* Save headers of SYN packet */ is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */ syn_smc:1; /* SYN includes SMC */ - u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ + u32 tlp_high_seq; /* snd_nxt at the time of TLP */ u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ u64 tcp_wstamp_ns; /* departure time for next sent data packet */ diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 53c0ea9ec9df..77fdc988c610 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -93,6 +93,7 @@ struct tpm_space { u8 *context_buf; u32 session_tbl[3]; u8 *session_buf; + u32 buf_size; }; struct tpm_bios_log { diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 131ea1bad458..f3caeeb7a0d0 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -81,6 +81,8 @@ struct tcg_efi_specid_event_algs { u16 digest_size; } __packed; +#define TCG_SPECID_SIG "Spec ID Event03" + struct tcg_efi_specid_event_head { u8 signature[16]; u32 platform_class; @@ -171,6 +173,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, int i; int j; u32 count, event_type; + const u8 zero_digest[sizeof(event_header->digest)] = {0}; marker = event; marker_start = marker; @@ -198,10 +201,26 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, count = READ_ONCE(event->count); event_type = READ_ONCE(event->event_type); + /* Verify that it's the log header */ + if (event_header->pcr_idx != 0 || + event_header->event_type != NO_ACTION || + memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) { + size = 0; + goto out; + } + efispecid = (struct tcg_efi_specid_event_head *)event_header->event; - /* Check if event is malformed. */ - if (count > efispecid->num_algs) { + /* + * Perform validation of the event in order to identify malformed + * events. This function may be asked to parse arbitrary byte sequences + * immediately following a valid event log. The caller expects this + * function to recognize that the byte sequence is not a valid event + * and to return an event size of 0. + */ + if (memcmp(efispecid->signature, TCG_SPECID_SIG, + sizeof(TCG_SPECID_SIG)) || + !efispecid->num_algs || count != efispecid->num_algs) { size = 0; goto out; } diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 1fb11daa5c53..57ce5af258a3 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -362,7 +362,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) static const char *___tp_str __tracepoint_string = str; \ ___tp_str; \ }) -#define __tracepoint_string __attribute__((section("__tracepoint_str"))) +#define __tracepoint_string __attribute__((section("__tracepoint_str"), used)) #else /* * tracepoint_string() is used to save the string address for userspace diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 8675e145ea8b..2040696d75b6 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -249,6 +249,9 @@ int usb_function_activate(struct usb_function *); int usb_interface_id(struct usb_configuration *, struct usb_function *); +int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f, + struct usb_ep *_ep, u8 alt); + int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep); diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index a15ce99dfc2d..78e006355557 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -151,7 +151,7 @@ struct ehci_regs { #define PORT_OWNER (1<<13) /* true: companion hc owns this port */ #define PORT_POWER (1<<12) /* true: has power (see PPC) */ #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ -/* 11:10 for detecting lowspeed devices (reset vs release ownership) */ +#define PORT_LS_MASK (3<<10) /* Link status (SE0, K or J */ /* 9 reserved */ #define PORT_LPM (1<<9) /* LPM transaction */ #define PORT_RESET (1<<8) /* reset port */ diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 6dad031be3c2..3a71ad716da5 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -51,8 +51,10 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int); int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); +int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); int __vfs_removexattr(struct dentry *, const char *); +int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **); int vfs_removexattr(struct dentry *, const char *); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 3f62b347b04a..ab8b3eb53d4b 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -273,6 +273,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr); int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); +void __ipv6_sock_ac_close(struct sock *sk); void ipv6_sock_ac_close(struct sock *sk); int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr); diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index ab988940bf04..55b980b21f4b 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -59,7 +59,7 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, struct sockaddr_rxrpc *); -u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *); +bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *); int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); diff --git a/include/net/dst.h b/include/net/dst.h index 3448cf865ede..433f7c1ce8a9 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -401,7 +401,15 @@ static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, co static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, struct sk_buff *skb) { - struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); + struct neighbour *n = NULL; + + /* The packets from tunnel devices (eg bareudp) may have only + * metadata in the dst pointer of skb. Hence a pointer check of + * neigh_lookup is needed. + */ + if (dst->ops->neigh_lookup) + n = dst->ops->neigh_lookup(dst, skb, NULL); + return IS_ERR(n) ? NULL : n; } diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 9292f1c588b7..2d9e67a69cbe 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -35,12 +35,6 @@ struct genl_info; * do additional, common, filtering and return an error * @post_doit: called after an operation's doit callback, it may * undo operations done by pre_doit, for example release locks - * @mcast_bind: a socket bound to the given multicast group (which - * is given as the offset into the groups array) - * @mcast_unbind: a socket was unbound from the given multicast group. - * Note that unbind() will not be called symmetrically if the - * generic netlink family is removed while there are still open - * sockets. * @attrbuf: buffer to store parsed attributes (private) * @mcgrps: multicast groups used by this family * @n_mcgrps: number of multicast groups @@ -64,8 +58,6 @@ struct genl_family { void (*post_doit)(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info); - int (*mcast_bind)(struct net *net, int group); - void (*mcast_unbind)(struct net *net, int group); struct nlattr ** attrbuf; /* private */ const struct genl_ops * ops; const struct genl_multicast_group *mcgrps; diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 895546058a20..c71eb294da95 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -309,6 +309,10 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); +/* update the fast reuse flag when adding a socket */ +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, + struct sock *sk); + struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); #define TCP_PINGPONG_THRESH 3 diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 0f0d1efe06dd..e1eaf1780288 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -172,7 +173,7 @@ static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) static inline int INET_ECN_set_ce(struct sk_buff *skb) { - switch (skb->protocol) { + switch (skb_protocol(skb, true)) { case cpu_to_be16(ETH_P_IP): if (skb_network_header(skb) + sizeof(struct iphdr) <= skb_tail_pointer(skb)) @@ -191,7 +192,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) static inline int INET_ECN_set_ect1(struct sk_buff *skb) { - switch (skb->protocol) { + switch (skb_protocol(skb, true)) { case cpu_to_be16(ETH_P_IP): if (skb_network_header(skb) + sizeof(struct iphdr) <= skb_tail_pointer(skb)) @@ -272,12 +273,16 @@ static inline int IP_ECN_decapsulate(const struct iphdr *oiph, { __u8 inner; - if (skb->protocol == htons(ETH_P_IP)) + switch (skb_protocol(skb, true)) { + case htons(ETH_P_IP): inner = ip_hdr(skb)->tos; - else if (skb->protocol == htons(ETH_P_IPV6)) + break; + case htons(ETH_P_IPV6): inner = ipv6_get_dsfield(ipv6_hdr(skb)); - else + break; + default: return 0; + } return INET_ECN_decapsulate(skb, oiph->tos, inner); } @@ -287,12 +292,16 @@ static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h, { __u8 inner; - if (skb->protocol == htons(ETH_P_IP)) + switch (skb_protocol(skb, true)) { + case htons(ETH_P_IP): inner = ip_hdr(skb)->tos; - else if (skb->protocol == htons(ETH_P_IPV6)) + break; + case htons(ETH_P_IPV6): inner = ipv6_get_dsfield(ipv6_hdr(skb)); - else + break; + default: return 0; + } return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner); } diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 078887c8c586..7c37e3c3b1c7 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1624,18 +1624,16 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) } #endif /* CONFIG_IP_VS_NFCT */ -/* Really using conntrack? */ -static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, - struct sk_buff *skb) +/* Using old conntrack that can not be redirected to another real server? */ +static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp, + struct sk_buff *skb) { #ifdef CONFIG_IP_VS_NFCT enum ip_conntrack_info ctinfo; struct nf_conn *ct; - if (!(cp->flags & IP_VS_CONN_F_NFCT)) - return false; ct = nf_ct_get(skb, &ctinfo); - if (ct) + if (ct && nf_ct_is_confirmed(ct)) return true; #endif return false; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 2d0275f13bbf..bc2c73f54962 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -143,6 +143,8 @@ static inline u64 nft_reg_load64(u32 *sreg) static inline void nft_data_copy(u32 *dst, const struct nft_data *src, unsigned int len) { + if (len % NFT_REG32_SIZE) + dst[len / NFT_REG32_SIZE] = 0; memcpy(dst, src, len); } diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 6a70845bd9ab..cee1c084e9f4 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -128,17 +128,6 @@ static inline void qdisc_run(struct Qdisc *q) } } -static inline __be16 tc_skb_protocol(const struct sk_buff *skb) -{ - /* We need to take extra care in case the skb came via - * vlan accelerated path. In that case, use skb->vlan_proto - * as the original vlan header was already stripped. - */ - if (skb_vlan_tag_present(skb)) - return skb->vlan_proto; - return skb->protocol; -} - /* Calculate maximal size of packet seen by hard_start_xmit routine of this device. */ diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 823afc42a3aa..06e1deeef464 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -341,11 +341,13 @@ enum { ipv4_is_anycast_6to4(a)) /* Flags used for the bind address copy functions. */ -#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by +#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by local sock family */ -#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by +#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by + local sock family */ +#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by peer */ -#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by +#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by peer */ /* Reasons to retransmit. */ diff --git a/include/net/sock.h b/include/net/sock.h index 6c5a3809483e..6d9c1131fe5c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -849,6 +849,8 @@ static inline int sk_memalloc_socks(void) { return static_branch_unlikely(&memalloc_socks_key); } + +void __receive_sock(struct file *file); #else static inline int sk_memalloc_socks(void) @@ -856,6 +858,8 @@ static inline int sk_memalloc_socks(void) return 0; } +static inline void __receive_sock(struct file *file) +{ } #endif static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) @@ -1803,7 +1807,6 @@ static inline int sk_rx_queue_get(const struct sock *sk) static inline void sk_set_socket(struct sock *sk, struct socket *sock) { - sk_tx_queue_clear(sk); sk->sk_socket = sock; } diff --git a/include/net/tcp.h b/include/net/tcp.h index 7cf1b4972c66..377179283c46 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1650,6 +1650,8 @@ void tcp_fastopen_destroy_cipher(struct sock *sk); void tcp_fastopen_ctx_destroy(struct net *net); int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, void *primary_key, void *backup_key); +int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk, + u64 *key); void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb); struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct request_sock *req, diff --git a/include/net/tls.h b/include/net/tls.h index db26e3ec918f..0a065bdffa39 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -590,6 +590,15 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk) return !!tls_sw_ctx_tx(ctx); } +static inline bool tls_sw_has_ctx_rx(const struct sock *sk) +{ + struct tls_context *ctx = tls_get_ctx(sk); + + if (!ctx) + return false; + return !!tls_sw_ctx_rx(ctx); +} + void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); void tls_device_write_space(struct sock *sk, struct tls_context *ctx); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index aa08a7a5f6ac..12aa6e15e43f 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -945,7 +945,7 @@ struct xfrm_dst { static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst; return xdst->path; @@ -957,7 +957,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { struct xfrm_dst *xdst = (struct xfrm_dst *) dst; return xdst->child; } @@ -1012,6 +1012,7 @@ struct xfrm_offload { #define XFRM_GRO 32 #define XFRM_ESP_NO_TRAILER 64 #define XFRM_DEV_RESUME 128 +#define XFRM_XMIT 256 __u32 status; #define CRYPTO_SUCCESS 1 @@ -1635,13 +1636,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, void *); void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net); int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, +struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, + const struct xfrm_mark *mark, + u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx, int delete, int *err); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8, - int dir, u32 id, int delete, int *err); +struct xfrm_policy *xfrm_policy_byid(struct net *net, + const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, u32 id, int delete, + int *err); int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); void xfrm_policy_hash_rebuild(struct net *net); u32 xfrm_get_acqseq(void); diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index b550ae89bf85..6dd3b5284fd1 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -278,6 +278,25 @@ struct rvt_rq { spinlock_t lock ____cacheline_aligned_in_smp; }; +/** + * rvt_get_rq_count - count numbers of request work queue entries + * in circular buffer + * @rq: data structure for request queue entry + * @head: head indices of the circular buffer + * @tail: tail indices of the circular buffer + * + * Return - total number of entries in the Receive Queue + */ + +static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail) +{ + u32 count = head - tail; + + if ((s32)count < 0) + count += rq->size; + return count; +} + /* * This structure holds the information that the send tasklet needs * to send a RDMA read response or atomic operation. diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h index 9b1d43d671a3..8c18dc6d3fde 100644 --- a/include/soc/nps/common.h +++ b/include/soc/nps/common.h @@ -45,6 +45,12 @@ #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60 #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422 +#ifndef AUX_IENABLE +#define AUX_IENABLE 0x40c +#endif + +#define CTOP_AUX_IACK (0xFFFFF800 + 0x088) + #ifndef __ASSEMBLY__ /* In order to increase compilation test coverage */ diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index bc88d6f964da..006f01922439 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -59,6 +59,7 @@ struct snd_compr_runtime { * @direction: stream direction, playback/recording * @metadata_set: metadata set flag, true when set * @next_track: has userspace signal next track transition, true when set + * @partial_drain: undergoing partial_drain for stream, true when set * @private_data: pointer to DSP private data */ struct snd_compr_stream { @@ -70,6 +71,7 @@ struct snd_compr_stream { enum snd_compr_direction direction; bool metadata_set; bool next_track; + bool partial_drain; void *private_data; }; @@ -173,7 +175,13 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream) if (snd_BUG_ON(!stream)) return; - stream->runtime->state = SNDRV_PCM_STATE_SETUP; + /* for partial_drain case we are back to running state on success */ + if (stream->partial_drain) { + stream->runtime->state = SNDRV_PCM_STATE_RUNNING; + stream->partial_drain = false; /* clear this flag as well */ + } else { + stream->runtime->state = SNDRV_PCM_STATE_SETUP; + } wake_up(&stream->runtime->sleep); } diff --git a/include/sound/rt5670.h b/include/sound/rt5670.h index f9024c7a1600..02e1d7778354 100644 --- a/include/sound/rt5670.h +++ b/include/sound/rt5670.h @@ -12,6 +12,7 @@ struct rt5670_platform_data { int jd_mode; bool in2_diff; bool dev_gpio; + bool gpio1_is_ext_spk_en; bool dmic_en; unsigned int dmic1_data_pin; diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index ba9efdc848f9..059b6e45a028 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -400,7 +400,7 @@ enum rxrpc_tx_point { EM(rxrpc_cong_begin_retransmission, " Retrans") \ EM(rxrpc_cong_cleared_nacks, " Cleared") \ EM(rxrpc_cong_new_low_nack, " NewLowN") \ - EM(rxrpc_cong_no_change, "") \ + EM(rxrpc_cong_no_change, " -") \ EM(rxrpc_cong_progress, " Progres") \ EM(rxrpc_cong_retransmit_again, " ReTxAgn") \ EM(rxrpc_cong_rtt_window_end, " RttWinE") \ diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 66282552db20..67434278b81d 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -499,8 +499,9 @@ DEFINE_WBC_EVENT(wbc_writepage); TRACE_EVENT(writeback_queue_io, TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work, + unsigned long dirtied_before, int moved), - TP_ARGS(wb, work, moved), + TP_ARGS(wb, work, dirtied_before, moved), TP_STRUCT__entry( __array(char, name, 32) __field(unsigned long, older) @@ -510,19 +511,17 @@ TRACE_EVENT(writeback_queue_io, __field(unsigned int, cgroup_ino) ), TP_fast_assign( - unsigned long *older_than_this = work->older_than_this; strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); - __entry->older = older_than_this ? *older_than_this : 0; - __entry->age = older_than_this ? - (jiffies - *older_than_this) * 1000 / HZ : -1; + __entry->older = dirtied_before; + __entry->age = (jiffies - dirtied_before) * 1000 / HZ; __entry->moved = moved; __entry->reason = work->reason; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u", __entry->name, - __entry->older, /* older_than_this in jiffies */ - __entry->age, /* older_than_this in relative milliseconds */ + __entry->older, /* dirtied_before in jiffies */ + __entry->age, /* dirtied_before in relative milliseconds */ __entry->moved, __print_symbolic(__entry->reason, WB_WORK_REASON), __entry->cgroup_ino diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 85387c76c24f..472cd5bc5567 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -808,7 +808,8 @@ #define SW_LINEIN_INSERT 0x0d /* set = inserted */ #define SW_MUTE_DEVICE 0x0e /* set = device disabled */ #define SW_PEN_INSERTED 0x0f /* set = pen inserted */ -#define SW_MAX 0x0f +#define SW_MACHINE_COVER 0x10 /* set = cover closed */ +#define SW_MAX 0x10 #define SW_CNT (SW_MAX+1) /* diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 52641d8ca9e8..1b6b8e05868d 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -189,9 +189,11 @@ struct kvm_hyperv_exit { #define KVM_EXIT_HYPERV_SYNIC 1 #define KVM_EXIT_HYPERV_HCALL 2 __u32 type; + __u32 pad1; union { struct { __u32 msr; + __u32 pad2; __u64 control; __u64 evt_page; __u64 msg_page; @@ -766,9 +768,10 @@ struct kvm_ppc_resize_hpt { #define KVM_VM_PPC_HV 1 #define KVM_VM_PPC_PR 2 -/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */ -#define KVM_VM_MIPS_TE 0 +/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */ +#define KVM_VM_MIPS_AUTO 0 #define KVM_VM_MIPS_VZ 1 +#define KVM_VM_MIPS_TE 2 #define KVM_S390_SIE_PAGE_OFFSET 1 diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 903cc2d2750b..84ae605c0643 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -93,6 +93,7 @@ #define BALLOON_KVM_MAGIC 0x13661366 #define ZSMALLOC_MAGIC 0x58295829 #define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */ +#define DEVMEM_MAGIC 0x454d444d /* "DMEM" */ #define Z3FOLD_MAGIC 0x33 #endif /* __LINUX_MAGIC_H__ */ diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index ed8881ad18ed..0a995403172c 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -132,7 +132,7 @@ enum nf_tables_msg_types { * @NFTA_LIST_ELEM: list element (NLA_NESTED) */ enum nft_list_attributes { - NFTA_LIST_UNPEC, + NFTA_LIST_UNSPEC, NFTA_LIST_ELEM, __NFTA_LIST_MAX }; diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index 90734aa5aa36..b5f901af79f0 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h @@ -93,5 +93,6 @@ struct seccomp_notif_resp { #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif) #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ struct seccomp_notif_resp) -#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64) +#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64) + #endif /* _UAPI_LINUX_SECCOMP_H */ diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h index 9cec58a6a5ea..f79d7abe27db 100644 --- a/include/uapi/linux/vboxguest.h +++ b/include/uapi/linux/vboxguest.h @@ -103,7 +103,7 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_driver_version_info, 24 + 20); /* IOCTL to perform a VMM Device request larger then 1KB. */ -#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) +#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IO('V', 3) /** VBG_IOCTL_HGCM_CONNECT data structure. */ @@ -198,7 +198,7 @@ struct vbg_ioctl_log { } u; }; -#define VBG_IOCTL_LOG(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) +#define VBG_IOCTL_LOG(s) _IO('V', 9) /** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */ diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h index 86eca3208b6b..a2c006a364e0 100644 --- a/include/uapi/linux/wireless.h +++ b/include/uapi/linux/wireless.h @@ -74,6 +74,8 @@ #include /* for "struct sockaddr" et al */ #include /* for IFNAMSIZ and co... */ +#include /* for offsetof */ + /***************************** VERSION *****************************/ /* * This constant is used to know the availability of the wireless @@ -1090,8 +1092,7 @@ struct iw_event { /* iw_point events are special. First, the payload (extra data) come at * the end of the event, so they are bigger than IW_EV_POINT_LEN. Second, * we omit the pointer, so start at an offset. */ -#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \ - (char *) NULL) +#define IW_EV_POINT_OFF offsetof(struct iw_point, length) #define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \ IW_EV_POINT_OFF) diff --git a/kernel/Makefile b/kernel/Makefile index daad787fb795..42557f251fea 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -128,7 +128,7 @@ $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE $(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz quiet_cmd_genikh = CHK $(obj)/kheaders_data.tar.xz - cmd_genikh = $(BASH) $(srctree)/kernel/gen_kheaders.sh $@ + cmd_genikh = $(CONFIG_SHELL) $(srctree)/kernel/gen_kheaders.sh $@ $(obj)/kheaders_data.tar.xz: FORCE $(call cmd,genikh) diff --git a/kernel/audit.c b/kernel/audit.c index fcfbb3476ccd..05ae208ad442 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -879,7 +879,7 @@ main_queue: return 0; } -int audit_send_list(void *_dest) +int audit_send_list_thread(void *_dest) { struct audit_netlink_list *dest = _dest; struct sk_buff *skb; @@ -923,19 +923,30 @@ out_kfree_skb: return NULL; } +static void audit_free_reply(struct audit_reply *reply) +{ + if (!reply) + return; + + if (reply->skb) + kfree_skb(reply->skb); + if (reply->net) + put_net(reply->net); + kfree(reply); +} + static int audit_send_reply_thread(void *arg) { struct audit_reply *reply = (struct audit_reply *)arg; - struct sock *sk = audit_get_sk(reply->net); audit_ctl_lock(); audit_ctl_unlock(); /* Ignore failure. It'll only happen if the sender goes away, because our timeout is set to infinite. */ - netlink_unicast(sk, reply->skb, reply->portid, 0); - put_net(reply->net); - kfree(reply); + netlink_unicast(audit_get_sk(reply->net), reply->skb, reply->portid, 0); + reply->skb = NULL; + audit_free_reply(reply); return 0; } @@ -949,35 +960,32 @@ static int audit_send_reply_thread(void *arg) * @payload: payload data * @size: payload size * - * Allocates an skb, builds the netlink message, and sends it to the port id. - * No failure notifications. + * Allocates a skb, builds the netlink message, and sends it to the port id. */ static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, int multi, const void *payload, int size) { - struct net *net = sock_net(NETLINK_CB(request_skb).sk); - struct sk_buff *skb; struct task_struct *tsk; - struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), - GFP_KERNEL); + struct audit_reply *reply; + reply = kzalloc(sizeof(*reply), GFP_KERNEL); if (!reply) return; - skb = audit_make_reply(seq, type, done, multi, payload, size); - if (!skb) - goto out; - - reply->net = get_net(net); + reply->skb = audit_make_reply(seq, type, done, multi, payload, size); + if (!reply->skb) + goto err; + reply->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); reply->portid = NETLINK_CB(request_skb).portid; - reply->skb = skb; tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); - if (!IS_ERR(tsk)) - return; - kfree_skb(skb); -out: - kfree(reply); + if (IS_ERR(tsk)) + goto err; + + return; + +err: + audit_free_reply(reply); } /* diff --git a/kernel/audit.h b/kernel/audit.h index 6fb7160412d4..ddc22878433d 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -229,7 +229,7 @@ struct audit_netlink_list { struct sk_buff_head q; }; -int audit_send_list(void *_dest); +int audit_send_list_thread(void *_dest); extern int selinux_audit_rule_update(void); diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 026e34da4ace..a10e2997aa6c 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -1161,11 +1161,8 @@ int audit_rule_change(int type, int seq, void *data, size_t datasz) */ int audit_list_rules_send(struct sk_buff *request_skb, int seq) { - u32 portid = NETLINK_CB(request_skb).portid; - struct net *net = sock_net(NETLINK_CB(request_skb).sk); struct task_struct *tsk; struct audit_netlink_list *dest; - int err = 0; /* We can't just spew out the rules here because we might fill * the available socket buffer space and deadlock waiting for @@ -1173,25 +1170,26 @@ int audit_list_rules_send(struct sk_buff *request_skb, int seq) * happen if we're actually running in the context of auditctl * trying to _send_ the stuff */ - dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); + dest = kmalloc(sizeof(*dest), GFP_KERNEL); if (!dest) return -ENOMEM; - dest->net = get_net(net); - dest->portid = portid; + dest->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); + dest->portid = NETLINK_CB(request_skb).portid; skb_queue_head_init(&dest->q); mutex_lock(&audit_filter_mutex); audit_list_rules(seq, &dest->q); mutex_unlock(&audit_filter_mutex); - tsk = kthread_run(audit_send_list, dest, "audit_send_list"); + tsk = kthread_run(audit_send_list_thread, dest, "audit_send_list"); if (IS_ERR(tsk)) { skb_queue_purge(&dest->q); + put_net(dest->net); kfree(dest); - err = PTR_ERR(tsk); + return PTR_ERR(tsk); } - return err; + return 0; } int audit_comparator(u32 left, u32 op, u32 right) diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 869e2e1860e8..b701af27a779 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -966,16 +966,23 @@ static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) { - if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0) + if (unlikely(max_optlen < 0)) return -EINVAL; + if (unlikely(max_optlen > PAGE_SIZE)) { + /* We don't expose optvals that are greater than PAGE_SIZE + * to the BPF program. + */ + max_optlen = PAGE_SIZE; + } + ctx->optval = kzalloc(max_optlen, GFP_USER); if (!ctx->optval) return -ENOMEM; ctx->optval_end = ctx->optval + max_optlen; - return 0; + return max_optlen; } static void sockopt_free_buf(struct bpf_sockopt_kern *ctx) @@ -1009,13 +1016,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, */ max_optlen = max_t(int, 16, *optlen); - ret = sockopt_alloc_buf(&ctx, max_optlen); - if (ret) - return ret; + max_optlen = sockopt_alloc_buf(&ctx, max_optlen); + if (max_optlen < 0) + return max_optlen; ctx.optlen = *optlen; - if (copy_from_user(ctx.optval, optval, *optlen) != 0) { + if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) { ret = -EFAULT; goto out; } @@ -1043,8 +1050,14 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, /* export any potential modifications */ *level = ctx.level; *optname = ctx.optname; - *optlen = ctx.optlen; - *kernel_optval = ctx.optval; + + /* optlen == 0 from BPF indicates that we should + * use original userspace data. + */ + if (ctx.optlen != 0) { + *optlen = ctx.optlen; + *kernel_optval = ctx.optval; + } } out: @@ -1076,12 +1089,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT)) return retval; - ret = sockopt_alloc_buf(&ctx, max_optlen); - if (ret) - return ret; - ctx.optlen = max_optlen; + max_optlen = sockopt_alloc_buf(&ctx, max_optlen); + if (max_optlen < 0) + return max_optlen; + if (!retval) { /* If kernel getsockopt finished successfully, * copy whatever was returned to the user back @@ -1095,10 +1108,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, goto out; } - if (ctx.optlen > max_optlen) - ctx.optlen = max_optlen; - - if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) { + if (copy_from_user(ctx.optval, optval, + min(ctx.optlen, max_optlen)) != 0) { ret = -EFAULT; goto out; } @@ -1127,10 +1138,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, goto out; } - if (copy_to_user(optval, ctx.optval, ctx.optlen) || - put_user(ctx.optlen, optlen)) { - ret = -EFAULT; - goto out; + if (ctx.optlen != 0) { + if (copy_to_user(optval, ctx.optval, ctx.optlen) || + put_user(ctx.optlen, optlen)) { + ret = -EFAULT; + goto out; + } } ret = ctx.retval; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index b4b6b77f309c..6684696fa457 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -88,12 +88,13 @@ struct bpf_dtab { static DEFINE_SPINLOCK(dev_map_lock); static LIST_HEAD(dev_map_list); -static struct hlist_head *dev_map_create_hash(unsigned int entries) +static struct hlist_head *dev_map_create_hash(unsigned int entries, + int numa_node) { int i; struct hlist_head *hash; - hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL); + hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); if (hash != NULL) for (i = 0; i < entries; i++) INIT_HLIST_HEAD(&hash[i]); @@ -151,7 +152,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu)); if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { - dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets); + dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, + dtab->map.numa_node); if (!dtab->dev_index_head) goto free_percpu; @@ -249,7 +251,7 @@ static void dev_map_free(struct bpf_map *map) } } - kfree(dtab->dev_index_head); + bpf_map_area_free(dtab->dev_index_head); } else { for (i = 0; i < dtab->map.max_entries; i++) { struct bpf_dtab_netdev *dev; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 22066a62c8c9..039d64b1bfb7 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -675,15 +675,20 @@ static void htab_elem_free_rcu(struct rcu_head *head) preempt_enable(); } -static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) { struct bpf_map *map = &htab->map; + void *ptr; if (map->ops->map_fd_put_ptr) { - void *ptr = fd_htab_map_get_ptr(map, l); - + ptr = fd_htab_map_get_ptr(map, l); map->ops->map_fd_put_ptr(ptr); } +} + +static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +{ + htab_put_fd_value(htab, l); if (htab_is_prealloc(htab)) { __pcpu_freelist_push(&htab->freelist, &l->fnode); @@ -735,6 +740,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, */ pl_new = this_cpu_ptr(htab->extra_elems); l_new = *pl_new; + htab_put_fd_value(htab, old_elem); *pl_new = old_elem; } else { struct pcpu_freelist_node *l; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 946cfdd3b2cc..bf03d04a9e2f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1118,7 +1118,8 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); - if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { + if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || + !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { err = -EPERM; goto err_put; } @@ -2028,10 +2029,10 @@ static int bpf_prog_detach(const union bpf_attr *attr) ptype = BPF_PROG_TYPE_CGROUP_DEVICE; break; case BPF_SK_MSG_VERDICT: - return sock_map_get_from_fd(attr, NULL); + return sock_map_prog_detach(attr, BPF_PROG_TYPE_SK_MSG); case BPF_SK_SKB_STREAM_PARSER: case BPF_SK_SKB_STREAM_VERDICT: - return sock_map_get_from_fd(attr, NULL); + return sock_map_prog_detach(attr, BPF_PROG_TYPE_SK_SKB); case BPF_LIRC_MODE2: return lirc_prog_detach(attr); case BPF_FLOW_DISSECTOR: @@ -2247,7 +2248,8 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, return NULL; } -static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) +static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, + const struct cred *f_cred) { const struct bpf_map *map; struct bpf_insn *insns; @@ -2270,7 +2272,7 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) insns[i].code = BPF_JMP | BPF_CALL; - if (!bpf_dump_raw_ok()) + if (!bpf_dump_raw_ok(f_cred)) insns[i].imm = 0; continue; } @@ -2322,7 +2324,8 @@ static int set_info_rec_size(struct bpf_prog_info *info) return 0; } -static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, +static int bpf_prog_get_info_by_fd(struct file *file, + struct bpf_prog *prog, const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -2391,11 +2394,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, struct bpf_insn *insns_sanitized; bool fault; - if (prog->blinded && !bpf_dump_raw_ok()) { + if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { info.xlated_prog_insns = 0; goto done; } - insns_sanitized = bpf_insn_prepare_dump(prog); + insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); if (!insns_sanitized) return -ENOMEM; uinsns = u64_to_user_ptr(info.xlated_prog_insns); @@ -2429,7 +2432,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, } if (info.jited_prog_len && ulen) { - if (bpf_dump_raw_ok()) { + if (bpf_dump_raw_ok(file->f_cred)) { uinsns = u64_to_user_ptr(info.jited_prog_insns); ulen = min_t(u32, info.jited_prog_len, ulen); @@ -2464,7 +2467,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, ulen = info.nr_jited_ksyms; info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; if (ulen) { - if (bpf_dump_raw_ok()) { + if (bpf_dump_raw_ok(file->f_cred)) { unsigned long ksym_addr; u64 __user *user_ksyms; u32 i; @@ -2495,7 +2498,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, ulen = info.nr_jited_func_lens; info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; if (ulen) { - if (bpf_dump_raw_ok()) { + if (bpf_dump_raw_ok(file->f_cred)) { u32 __user *user_lens; u32 func_len, i; @@ -2552,7 +2555,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, else info.nr_jited_line_info = 0; if (info.nr_jited_line_info && ulen) { - if (bpf_dump_raw_ok()) { + if (bpf_dump_raw_ok(file->f_cred)) { __u64 __user *user_linfo; u32 i; @@ -2598,7 +2601,8 @@ done: return 0; } -static int bpf_map_get_info_by_fd(struct bpf_map *map, +static int bpf_map_get_info_by_fd(struct file *file, + struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -2640,7 +2644,8 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map, return 0; } -static int bpf_btf_get_info_by_fd(struct btf *btf, +static int bpf_btf_get_info_by_fd(struct file *file, + struct btf *btf, const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -2672,13 +2677,13 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, return -EBADFD; if (f.file->f_op == &bpf_prog_fops) - err = bpf_prog_get_info_by_fd(f.file->private_data, attr, + err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, uattr); else if (f.file->f_op == &bpf_map_fops) - err = bpf_map_get_info_by_fd(f.file->private_data, attr, + err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, uattr); else if (f.file->f_op == &btf_fops) - err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr); + err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); else err = -EINVAL; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 7c9e97553a00..35faf082a709 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -6379,18 +6379,8 @@ void cgroup_sk_alloc_disable(void) void cgroup_sk_alloc(struct sock_cgroup_data *skcd) { - if (cgroup_sk_alloc_disabled) - return; - - /* Socket clone path */ - if (skcd->val) { - /* - * We might be cloning a socket which is left in an empty - * cgroup and the cgroup might have already been rmdir'd. - * Don't use cgroup_get_live(). - */ - cgroup_get(sock_cgroup_ptr(skcd)); - cgroup_bpf_get(sock_cgroup_ptr(skcd)); + if (cgroup_sk_alloc_disabled) { + skcd->no_refcnt = 1; return; } @@ -6415,10 +6405,27 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd) rcu_read_unlock(); } +void cgroup_sk_clone(struct sock_cgroup_data *skcd) +{ + if (skcd->val) { + if (skcd->no_refcnt) + return; + /* + * We might be cloning a socket which is left in an empty + * cgroup and the cgroup might have already been rmdir'd. + * Don't use cgroup_get_live(). + */ + cgroup_get(sock_cgroup_ptr(skcd)); + cgroup_bpf_get(sock_cgroup_ptr(skcd)); + } +} + void cgroup_sk_free(struct sock_cgroup_data *skcd) { struct cgroup *cgrp = sock_cgroup_ptr(skcd); + if (skcd->no_refcnt) + return; cgroup_bpf_put(cgrp); cgroup_put(cgrp); } diff --git a/kernel/cpu.c b/kernel/cpu.c index d7890c1285bf..7527825ac7da 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -3,6 +3,7 @@ * * This code is licenced under the GPL. */ +#include #include #include #include @@ -564,6 +565,21 @@ static int bringup_cpu(unsigned int cpu) return bringup_wait_for_ap(cpu); } +static int finish_cpu(unsigned int cpu) +{ + struct task_struct *idle = idle_thread_get(cpu); + struct mm_struct *mm = idle->active_mm; + + /* + * idle_task_exit() will have switched to &init_mm, now + * clean up any remaining active_mm state. + */ + if (mm != &init_mm) + idle->active_mm = &init_mm; + mmdrop(mm); + return 0; +} + /* * Hotplug state machine related functions */ @@ -1434,7 +1450,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { [CPUHP_BRINGUP_CPU] = { .name = "cpu:bringup", .startup.single = bringup_cpu, - .teardown.single = NULL, + .teardown.single = finish_cpu, .cant_stop = true, }, /* Final state before CPU kills itself */ diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c index cbca6879ab7d..44a259338e33 100644 --- a/kernel/cpu_pm.c +++ b/kernel/cpu_pm.c @@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); */ int cpu_pm_enter(void) { - int nr_calls; + int nr_calls = 0; int ret = 0; ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); @@ -131,7 +131,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit); */ int cpu_cluster_pm_enter(void) { - int nr_calls; + int nr_calls = 0; int ret = 0; ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls); diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index f76d6f77dd5e..2222f3225e53 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -501,6 +501,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) if (exception_level > 1) { dump_stack(); + kgdb_io_module_registered = false; panic("Recursive entry to debugger"); } @@ -545,6 +546,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, arch_kgdb_ops.disable_hw_break(regs); acquirelock: + rcu_read_lock(); /* * Interrupts will be restored by the 'trap return' code, except when * single stepping. @@ -601,6 +603,7 @@ return_normal: atomic_dec(&slaves_in_kgdb); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); return 0; } cpu_relax(); @@ -619,6 +622,7 @@ return_normal: raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); goto acquirelock; } @@ -634,6 +638,8 @@ return_normal: if (kgdb_skipexception(ks->ex_vector, ks->linux_regs)) goto kgdb_restore; + atomic_inc(&ignore_console_lock_warning); + /* Call the I/O driver's pre_exception routine */ if (dbg_io_ops->pre_exception) dbg_io_ops->pre_exception(); @@ -706,6 +712,8 @@ cpu_master_loop: if (dbg_io_ops->post_exception) dbg_io_ops->post_exception(); + atomic_dec(&ignore_console_lock_warning); + if (!kgdb_single_step) { raw_spin_unlock(&dbg_slave_lock); /* Wait till all the CPUs have quit from the debugger. */ @@ -738,6 +746,7 @@ kgdb_restore: raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); return kgdb_info[cpu].ret_state; } diff --git a/kernel/events/core.c b/kernel/events/core.c index aaaf50b25cc9..db1f5aa755f2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2171,6 +2171,7 @@ __perf_remove_from_context(struct perf_event *event, if (!ctx->nr_events && ctx->is_active) { ctx->is_active = 0; + ctx->rotate_necessary = 0; if (ctx->task) { WARN_ON_ONCE(cpuctx->task_ctx != ctx); cpuctx->task_ctx = NULL; @@ -3047,12 +3048,6 @@ static void ctx_sched_out(struct perf_event_context *ctx, if (!ctx->nr_active || !(is_active & EVENT_ALL)) return; - /* - * If we had been multiplexing, no rotations are necessary, now no events - * are active. - */ - ctx->rotate_necessary = 0; - perf_pmu_disable(ctx->pmu); if (is_active & EVENT_PINNED) { list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) @@ -3062,6 +3057,13 @@ static void ctx_sched_out(struct perf_event_context *ctx, if (is_active & EVENT_FLEXIBLE) { list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) group_sched_out(event, cpuctx, ctx); + + /* + * Since we cleared EVENT_FLEXIBLE, also clear + * rotate_necessary, is will be reset by + * ctx_flexible_sched_in() when needed. + */ + ctx->rotate_necessary = 0; } perf_pmu_enable(ctx->pmu); } @@ -3800,6 +3802,12 @@ ctx_event_to_rotate(struct perf_event_context *ctx) typeof(*event), group_node); } + /* + * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in() + * finds there are unschedulable events, it will set it again. + */ + ctx->rotate_necessary = 0; + return event; } diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 291680ba8504..a793bd23fe56 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -211,7 +211,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, try_to_free_swap(old_page); page_vma_mapped_walk_done(&pvmw); - if (vma->vm_flags & VM_LOCKED) + if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page)) munlock_vma_page(old_page); put_page(old_page); @@ -2205,7 +2205,7 @@ static void handle_swbp(struct pt_regs *regs) if (!uprobe) { if (is_swbp > 0) { /* No matching uprobe; signal SIGTRAP. */ - send_sig(SIGTRAP, current, 0); + force_sig(SIGTRAP); } else { /* * Either we raced with uprobe_unregister() or we can't diff --git a/kernel/exit.c b/kernel/exit.c index 22dfaac9e48c..fa46977b9c07 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -713,8 +713,12 @@ void __noreturn do_exit(long code) struct task_struct *tsk = current; int group_dead; - profile_task_exit(tsk); - kcov_task_exit(tsk); + /* + * We can get here from a kernel oops, sometimes with preemption off. + * Start by checking for critical errors. + * Then fix up important state like USER_DS and preemption. + * Then do everything else. + */ WARN_ON(blk_needs_flush_plug(tsk)); @@ -732,6 +736,16 @@ void __noreturn do_exit(long code) */ set_fs(USER_DS); + if (unlikely(in_atomic())) { + pr_info("note: %s[%d] exited with preempt_count %d\n", + current->comm, task_pid_nr(current), + preempt_count()); + preempt_count_set(PREEMPT_ENABLED); + } + + profile_task_exit(tsk); + kcov_task_exit(tsk); + ptrace_event(PTRACE_EVENT_EXIT, code); validate_creds_for_do_exit(tsk); @@ -749,13 +763,6 @@ void __noreturn do_exit(long code) exit_signals(tsk); /* sets PF_EXITING */ - if (unlikely(in_atomic())) { - pr_info("note: %s[%d] exited with preempt_count %d\n", - current->comm, task_pid_nr(current), - preempt_count()); - preempt_count_set(PREEMPT_ENABLED); - } - /* sync mm's RSS info before statistics gathering */ if (tsk->mm) sync_mm_rss(tsk->mm); diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index ec37563674d6..60c7be5ff5c8 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -19,7 +19,9 @@ #include #include "gcov.h" -#if (__GNUC__ >= 7) +#if (__GNUC__ >= 10) +#define GCOV_COUNTERS 8 +#elif (__GNUC__ >= 7) #define GCOV_COUNTERS 9 #elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) #define GCOV_COUNTERS 10 diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh index 5a0fc0b0403a..c1510f0ab3ea 100755 --- a/kernel/gen_kheaders.sh +++ b/kernel/gen_kheaders.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # SPDX-License-Identifier: GPL-2.0 # This script generates an archive consisting of kernel headers @@ -21,30 +21,38 @@ arch/$SRCARCH/include/ # Uncomment it for debugging. # if [ ! -f /tmp/iter ]; then iter=1; echo 1 > /tmp/iter; # else iter=$(($(cat /tmp/iter) + 1)); echo $iter > /tmp/iter; fi -# find $src_file_list -name "*.h" | xargs ls -l > /tmp/src-ls-$iter -# find $obj_file_list -name "*.h" | xargs ls -l > /tmp/obj-ls-$iter +# find $all_dirs -name "*.h" | xargs ls -l > /tmp/ls-$iter + +all_dirs= +if [ "$building_out_of_srctree" ]; then + for d in $dir_list; do + all_dirs="$all_dirs $srctree/$d" + done +fi +all_dirs="$all_dirs $dir_list" # include/generated/compile.h is ignored because it is touched even when none -# of the source files changed. This causes pointless regeneration, so let us -# ignore them for md5 calculation. -pushd $srctree > /dev/null -src_files_md5="$(find $dir_list -name "*.h" | - grep -v "include/generated/compile.h" | - grep -v "include/generated/autoconf.h" | - xargs ls -l | md5sum | cut -d ' ' -f1)" -popd > /dev/null -obj_files_md5="$(find $dir_list -name "*.h" | - grep -v "include/generated/compile.h" | - grep -v "include/generated/autoconf.h" | +# of the source files changed. +# +# When Kconfig regenerates include/generated/autoconf.h, its timestamp is +# updated, but the contents might be still the same. When any CONFIG option is +# changed, Kconfig touches the corresponding timestamp file include/config/*.h. +# Hence, the md5sum detects the configuration change anyway. We do not need to +# check include/generated/autoconf.h explicitly. +# +# Ignore them for md5 calculation to avoid pointless regeneration. +headers_md5="$(find $all_dirs -name "*.h" | + grep -v "include/generated/compile.h" | + grep -v "include/generated/autoconf.h" | xargs ls -l | md5sum | cut -d ' ' -f1)" + # Any changes to this script will also cause a rebuild of the archive. this_file_md5="$(ls -l $sfile | md5sum | cut -d ' ' -f1)" if [ -f $tarfile ]; then tarfile_md5="$(md5sum $tarfile | cut -d ' ' -f1)"; fi if [ -f kernel/kheaders.md5 ] && - [ "$(cat kernel/kheaders.md5|head -1)" == "$src_files_md5" ] && - [ "$(cat kernel/kheaders.md5|head -2|tail -1)" == "$obj_files_md5" ] && - [ "$(cat kernel/kheaders.md5|head -3|tail -1)" == "$this_file_md5" ] && - [ "$(cat kernel/kheaders.md5|tail -1)" == "$tarfile_md5" ]; then + [ "$(head -n 1 kernel/kheaders.md5)" = "$headers_md5" ] && + [ "$(head -n 2 kernel/kheaders.md5 | tail -n 1)" = "$this_file_md5" ] && + [ "$(tail -n 1 kernel/kheaders.md5)" = "$tarfile_md5" ]; then exit fi @@ -55,14 +63,17 @@ fi rm -rf $cpio_dir mkdir $cpio_dir -pushd $srctree > /dev/null -for f in $dir_list; - do find "$f" -name "*.h"; -done | cpio --quiet -pd $cpio_dir -popd > /dev/null +if [ "$building_out_of_srctree" ]; then + ( + cd $srctree + for f in $dir_list + do find "$f" -name "*.h"; + done | cpio --quiet -pd $cpio_dir + ) +fi -# The second CPIO can complain if files already exist which can -# happen with out of tree builds. Just silence CPIO for now. +# The second CPIO can complain if files already exist which can happen with out +# of tree builds having stale headers in srctree. Just silence CPIO for now. for f in $dir_list; do find "$f" -name "*.h"; done | cpio --quiet -pd $cpio_dir >/dev/null 2>&1 @@ -77,10 +88,9 @@ find $cpio_dir -type f -print0 | find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \ tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \ --owner=0 --group=0 --numeric-owner --no-recursion \ - -Jcf $tarfile -C $cpio_dir/ -T - > /dev/null + -I $XZ -cf $tarfile -C $cpio_dir/ -T - > /dev/null -echo "$src_files_md5" > kernel/kheaders.md5 -echo "$obj_files_md5" >> kernel/kheaders.md5 +echo $headers_md5 > kernel/kheaders.md5 echo "$this_file_md5" >> kernel/kheaders.md5 echo "$(md5sum $tarfile | cut -d ' ' -f1)" >> kernel/kheaders.md5 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index b304c17d53a3..3b1d0a4725a4 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -194,9 +194,9 @@ void irq_set_thread_affinity(struct irq_desc *desc) set_bit(IRQTF_AFFINITY, &action->thread_flags); } +#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK static void irq_validate_effective_affinity(struct irq_data *data) { -#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK const struct cpumask *m = irq_data_get_effective_affinity_mask(data); struct irq_chip *chip = irq_data_get_irq_chip(data); @@ -204,9 +204,19 @@ static void irq_validate_effective_affinity(struct irq_data *data) return; pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", chip->name, data->irq); -#endif } +static inline void irq_init_effective_affinity(struct irq_data *data, + const struct cpumask *mask) +{ + cpumask_copy(irq_data_get_effective_affinity_mask(data), mask); +} +#else +static inline void irq_validate_effective_affinity(struct irq_data *data) { } +static inline void irq_init_effective_affinity(struct irq_data *data, + const struct cpumask *mask) { } +#endif + int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { @@ -265,6 +275,30 @@ static int irq_try_set_affinity(struct irq_data *data, return ret; } +static bool irq_set_affinity_deactivated(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + struct irq_desc *desc = irq_data_to_desc(data); + + /* + * Handle irq chips which can handle affinity only in activated + * state correctly + * + * If the interrupt is not yet activated, just store the affinity + * mask and do not call the chip driver at all. On activation the + * driver has to make sure anyway that the interrupt is in a + * useable state so startup works. + */ + if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || + irqd_is_activated(data) || !irqd_affinity_on_activate(data)) + return false; + + cpumask_copy(desc->irq_common_data.affinity, mask); + irq_init_effective_affinity(data, mask); + irqd_set(data, IRQD_AFFINITY_SET); + return true; +} + int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, bool force) { @@ -275,6 +309,9 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, if (!chip || !chip->irq_set_affinity) return -EINVAL; + if (irq_set_affinity_deactivated(data, mask, force)) + return 0; + if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) { ret = irq_try_set_affinity(data, mask, force); } else { diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 30cc217b8631..651a4ad6d711 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -380,6 +380,13 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, unsigned int cpu, bit; struct cpumap *cm; + /* + * Not required in theory, but matrix_find_best_cpu() uses + * for_each_cpu() which ignores the cpumask on UP . + */ + if (cpumask_empty(msk)) + return -EINVAL; + cpu = matrix_find_best_cpu(m, msk); if (cpu == UINT_MAX) return -ENOSPC; diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 8f557fa1f4fe..c6c7e187ae74 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -185,14 +185,18 @@ void rearm_wake_irq(unsigned int irq) unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); - if (!desc || !(desc->istate & IRQS_SUSPENDED) || - !irqd_is_wakeup_set(&desc->irq_data)) + if (!desc) return; + if (!(desc->istate & IRQS_SUSPENDED) || + !irqd_is_wakeup_set(&desc->irq_data)) + goto unlock; + desc->istate &= ~IRQS_SUSPENDED; irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); __enable_irq(desc); +unlock: irq_put_desc_busunlock(desc, flags); } diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 136ce049c4ad..61f9d781f70a 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -645,19 +645,20 @@ static inline int kallsyms_for_perf(void) * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to * block even that). */ -int kallsyms_show_value(void) +bool kallsyms_show_value(const struct cred *cred) { switch (kptr_restrict) { case 0: if (kallsyms_for_perf()) - return 1; + return true; /* fallthrough */ case 1: - if (has_capability_noaudit(current, CAP_SYSLOG)) - return 1; + if (security_capable(cred, &init_user_ns, CAP_SYSLOG, + CAP_OPT_NOAUDIT) == 0) + return true; /* fallthrough */ default: - return 0; + return false; } } @@ -674,7 +675,11 @@ static int kallsyms_open(struct inode *inode, struct file *file) return -ENOMEM; reset_iter(iter, 0); - iter->show_value = kallsyms_show_value(); + /* + * Instead of checking this on every s_show() call, cache + * the result here at open time. + */ + iter->show_value = kallsyms_show_value(file->f_cred); return 0; } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 2625c241ac00..bbff4bccb885 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -326,7 +326,8 @@ struct kprobe *get_kprobe(void *addr) struct kprobe *p; head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; - hlist_for_each_entry_rcu(p, head, hlist) { + hlist_for_each_entry_rcu(p, head, hlist, + lockdep_is_held(&kprobe_mutex)) { if (p->addr == addr) return p; } @@ -586,11 +587,12 @@ static void kprobe_optimizer(struct work_struct *work) mutex_unlock(&module_mutex); mutex_unlock(&text_mutex); cpus_read_unlock(); - mutex_unlock(&kprobe_mutex); /* Step 5: Kick optimizer again if needed */ if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) kick_kprobe_optimizer(); + + mutex_unlock(&kprobe_mutex); } /* Wait for completing optimization and unoptimization */ @@ -1236,6 +1238,26 @@ __releases(hlist_lock) } NOKPROBE_SYMBOL(kretprobe_table_unlock); +struct kprobe kprobe_busy = { + .addr = (void *) get_kprobe, +}; + +void kprobe_busy_begin(void) +{ + struct kprobe_ctlblk *kcb; + + preempt_disable(); + __this_cpu_write(current_kprobe, &kprobe_busy); + kcb = get_kprobe_ctlblk(); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; +} + +void kprobe_busy_end(void) +{ + __this_cpu_write(current_kprobe, NULL); + preempt_enable(); +} + /* * This function is called from finish_task_switch when task tk becomes dead, * so that we can recycle any function-return probe instances associated @@ -1253,6 +1275,8 @@ void kprobe_flush_task(struct task_struct *tk) /* Early boot. kretprobe_table_locks not yet initialized. */ return; + kprobe_busy_begin(); + INIT_HLIST_HEAD(&empty_rp); hash = hash_ptr(tk, KPROBE_HASH_BITS); head = &kretprobe_inst_table[hash]; @@ -1266,6 +1290,8 @@ void kprobe_flush_task(struct task_struct *tk) hlist_del(&ri->hlist); kfree(ri); } + + kprobe_busy_end(); } NOKPROBE_SYMBOL(kprobe_flush_task); @@ -2078,6 +2104,13 @@ static void kill_kprobe(struct kprobe *p) * the original probed function (which will be freed soon) any more. */ arch_remove_kprobe(p); + + /* + * The module is going away. We should disarm the kprobe which + * is using ftrace. + */ + if (kprobe_ftrace(p)) + disarm_kprobe_ftrace(p); } /* Disable one kprobe */ @@ -2336,7 +2369,7 @@ static void report_probe(struct seq_file *pi, struct kprobe *p, else kprobe_type = "k"; - if (!kallsyms_show_value()) + if (!kallsyms_show_value(pi->file->f_cred)) addr = NULL; if (sym) @@ -2437,7 +2470,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) * If /proc/kallsyms is not showing kernel address, we won't * show them here either. */ - if (!kallsyms_show_value()) + if (!kallsyms_show_value(m->file->f_cred)) seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, (void *)ent->start_addr); else diff --git a/kernel/kthread.c b/kernel/kthread.c index b262f47046ca..bfbfa481be3a 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -199,8 +199,15 @@ static void __kthread_parkme(struct kthread *self) if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) break; + /* + * Thread is going to call schedule(), do not preempt it, + * or the caller of kthread_park() may spend more time in + * wait_task_inactive(). + */ + preempt_disable(); complete(&self->parked); - schedule(); + schedule_preempt_disabled(); + preempt_enable(); } __set_current_state(TASK_RUNNING); } @@ -245,8 +252,14 @@ static int kthread(void *_create) /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_UNINTERRUPTIBLE); create->result = current; + /* + * Thread is going to call schedule(), do not preempt it, + * or the creator may spend more time in wait_task_inactive(). + */ + preempt_disable(); complete(done); - schedule(); + schedule_preempt_disabled(); + preempt_enable(); ret = -EINTR; if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 9bb6d2497b04..581f81818138 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -400,7 +400,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt) seq_time(m, lt->min); seq_time(m, lt->max); seq_time(m, lt->total); - seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0); + seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0); } static void seq_stats(struct seq_file *m, struct lock_stat_data *data) diff --git a/kernel/module.c b/kernel/module.c index a2a47f4a33a7..819c5d3b4c29 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1507,8 +1507,7 @@ static inline bool sect_empty(const Elf_Shdr *sect) } struct module_sect_attr { - struct module_attribute mattr; - char *name; + struct bin_attribute battr; unsigned long address; }; @@ -1518,13 +1517,34 @@ struct module_sect_attrs { struct module_sect_attr attrs[0]; }; -static ssize_t module_sect_show(struct module_attribute *mattr, - struct module_kobject *mk, char *buf) +#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4)) +static ssize_t module_sect_read(struct file *file, struct kobject *kobj, + struct bin_attribute *battr, + char *buf, loff_t pos, size_t count) { struct module_sect_attr *sattr = - container_of(mattr, struct module_sect_attr, mattr); - return sprintf(buf, "0x%px\n", kptr_restrict < 2 ? - (void *)sattr->address : NULL); + container_of(battr, struct module_sect_attr, battr); + char bounce[MODULE_SECT_READ_SIZE + 1]; + size_t wrote; + + if (pos != 0) + return -EINVAL; + + /* + * Since we're a binary read handler, we must account for the + * trailing NUL byte that sprintf will write: if "buf" is + * too small to hold the NUL, or the NUL is exactly the last + * byte, the read will look like it got truncated by one byte. + * Since there is no way to ask sprintf nicely to not write + * the NUL, we have to use a bounce buffer. + */ + wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n", + kallsyms_show_value(file->f_cred) + ? (void *)sattr->address : NULL); + count = min(count, wrote); + memcpy(buf, bounce, count); + + return count; } static void free_sect_attrs(struct module_sect_attrs *sect_attrs) @@ -1532,7 +1552,7 @@ static void free_sect_attrs(struct module_sect_attrs *sect_attrs) unsigned int section; for (section = 0; section < sect_attrs->nsections; section++) - kfree(sect_attrs->attrs[section].name); + kfree(sect_attrs->attrs[section].battr.attr.name); kfree(sect_attrs); } @@ -1541,42 +1561,41 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info) unsigned int nloaded = 0, i, size[2]; struct module_sect_attrs *sect_attrs; struct module_sect_attr *sattr; - struct attribute **gattr; + struct bin_attribute **gattr; /* Count loaded sections and allocate structures */ for (i = 0; i < info->hdr->e_shnum; i++) if (!sect_empty(&info->sechdrs[i])) nloaded++; size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded), - sizeof(sect_attrs->grp.attrs[0])); - size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]); + sizeof(sect_attrs->grp.bin_attrs[0])); + size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]); sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); if (sect_attrs == NULL) return; /* Setup section attributes. */ sect_attrs->grp.name = "sections"; - sect_attrs->grp.attrs = (void *)sect_attrs + size[0]; + sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0]; sect_attrs->nsections = 0; sattr = §_attrs->attrs[0]; - gattr = §_attrs->grp.attrs[0]; + gattr = §_attrs->grp.bin_attrs[0]; for (i = 0; i < info->hdr->e_shnum; i++) { Elf_Shdr *sec = &info->sechdrs[i]; if (sect_empty(sec)) continue; + sysfs_bin_attr_init(&sattr->battr); sattr->address = sec->sh_addr; - sattr->name = kstrdup(info->secstrings + sec->sh_name, - GFP_KERNEL); - if (sattr->name == NULL) + sattr->battr.attr.name = + kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL); + if (sattr->battr.attr.name == NULL) goto out; sect_attrs->nsections++; - sysfs_attr_init(&sattr->mattr.attr); - sattr->mattr.show = module_sect_show; - sattr->mattr.store = NULL; - sattr->mattr.attr.name = sattr->name; - sattr->mattr.attr.mode = S_IRUSR; - *(gattr++) = &(sattr++)->mattr.attr; + sattr->battr.read = module_sect_read; + sattr->battr.size = MODULE_SECT_READ_SIZE; + sattr->battr.attr.mode = 0400; + *(gattr++) = &(sattr++)->battr; } *gattr = NULL; @@ -1666,7 +1685,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info) continue; if (info->sechdrs[i].sh_type == SHT_NOTE) { sysfs_bin_attr_init(nattr); - nattr->attr.name = mod->sect_attrs->attrs[loaded].name; + nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name; nattr->attr.mode = S_IRUGO; nattr->size = info->sechdrs[i].sh_size; nattr->private = (void *) info->sechdrs[i].sh_addr; @@ -4391,7 +4410,7 @@ static int modules_open(struct inode *inode, struct file *file) if (!err) { struct seq_file *m = file->private_data; - m->private = kallsyms_show_value() ? NULL : (void *)8ul; + m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul; } return err; diff --git a/kernel/relay.c b/kernel/relay.c index 4b760ec16342..d3940becf2fc 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -197,6 +197,7 @@ free_buf: static void relay_destroy_channel(struct kref *kref) { struct rchan *chan = container_of(kref, struct rchan, kref); + free_percpu(chan->buf); kfree(chan); } diff --git a/kernel/resource.c b/kernel/resource.c index 76036a41143b..841737bbda9e 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -1126,6 +1126,7 @@ struct resource * __request_region(struct resource *parent, { DECLARE_WAITQUEUE(wait, current); struct resource *res = alloc_resource(GFP_KERNEL); + struct resource *orig_parent = parent; if (!res) return NULL; @@ -1176,6 +1177,10 @@ struct resource * __request_region(struct resource *parent, break; } write_unlock(&resource_lock); + + if (res && orig_parent == &iomem_resource) + revoke_devmem(res); + return res; } EXPORT_SYMBOL(__request_region); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e99d326fa569..352239c411a4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -794,6 +794,26 @@ unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; /* All clamps are required to be less or equal than these values */ static struct uclamp_se uclamp_default[UCLAMP_CNT]; +/* + * This static key is used to reduce the uclamp overhead in the fast path. It + * primarily disables the call to uclamp_rq_{inc, dec}() in + * enqueue/dequeue_task(). + * + * This allows users to continue to enable uclamp in their kernel config with + * minimum uclamp overhead in the fast path. + * + * As soon as userspace modifies any of the uclamp knobs, the static key is + * enabled, since we have an actual users that make use of uclamp + * functionality. + * + * The knobs that would enable this static key are: + * + * * A task modifying its uclamp value with sched_setattr(). + * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs. + * * An admin modifying the cgroup cpu.uclamp.{min, max} + */ +DEFINE_STATIC_KEY_FALSE(sched_uclamp_used); + /* Integer rounded range for each bucket */ #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) @@ -990,10 +1010,38 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, lockdep_assert_held(&rq->lock); + /* + * If sched_uclamp_used was enabled after task @p was enqueued, + * we could end up with unbalanced call to uclamp_rq_dec_id(). + * + * In this case the uc_se->active flag should be false since no uclamp + * accounting was performed at enqueue time and we can just return + * here. + * + * Need to be careful of the following enqeueue/dequeue ordering + * problem too + * + * enqueue(taskA) + * // sched_uclamp_used gets enabled + * enqueue(taskB) + * dequeue(taskA) + * // Must not decrement bukcet->tasks here + * dequeue(taskB) + * + * where we could end up with stale data in uc_se and + * bucket[uc_se->bucket_id]. + * + * The following check here eliminates the possibility of such race. + */ + if (unlikely(!uc_se->active)) + return; + bucket = &uc_rq->bucket[uc_se->bucket_id]; + SCHED_WARN_ON(!bucket->tasks); if (likely(bucket->tasks)) bucket->tasks--; + uc_se->active = false; /* @@ -1021,6 +1069,15 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { enum uclamp_id clamp_id; + /* + * Avoid any overhead until uclamp is actually used by the userspace. + * + * The condition is constructed such that a NOP is generated when + * sched_uclamp_used is disabled. + */ + if (!static_branch_unlikely(&sched_uclamp_used)) + return; + if (unlikely(!p->sched_class->uclamp_enabled)) return; @@ -1036,6 +1093,15 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { enum uclamp_id clamp_id; + /* + * Avoid any overhead until uclamp is actually used by the userspace. + * + * The condition is constructed such that a NOP is generated when + * sched_uclamp_used is disabled. + */ + if (!static_branch_unlikely(&sched_uclamp_used)) + return; + if (unlikely(!p->sched_class->uclamp_enabled)) return; @@ -1145,8 +1211,10 @@ int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, update_root_tg = true; } - if (update_root_tg) + if (update_root_tg) { + static_branch_enable(&sched_uclamp_used); uclamp_update_root_tg(); + } /* * We update all RUNNABLE tasks only when task groups are in use. @@ -1181,6 +1249,15 @@ static int uclamp_validate(struct task_struct *p, if (upper_bound > SCHED_CAPACITY_SCALE) return -EINVAL; + /* + * We have valid uclamp attributes; make sure uclamp is enabled. + * + * We need to do that here, because enabling static branches is a + * blocking operation which obviously cannot be done while holding + * scheduler locks. + */ + static_branch_enable(&sched_uclamp_used); + return 0; } @@ -1238,6 +1315,20 @@ static void uclamp_fork(struct task_struct *p) } } +static void __init init_uclamp_rq(struct rq *rq) +{ + enum uclamp_id clamp_id; + struct uclamp_rq *uc_rq = rq->uclamp; + + for_each_clamp_id(clamp_id) { + uc_rq[clamp_id] = (struct uclamp_rq) { + .value = uclamp_none(clamp_id) + }; + } + + rq->uclamp_flags = 0; +} + static void __init init_uclamp(void) { struct uclamp_se uc_max = {}; @@ -1246,11 +1337,8 @@ static void __init init_uclamp(void) mutex_init(&uclamp_mutex); - for_each_possible_cpu(cpu) { - memset(&cpu_rq(cpu)->uclamp, 0, - sizeof(struct uclamp_rq)*UCLAMP_CNT); - cpu_rq(cpu)->uclamp_flags = 0; - } + for_each_possible_cpu(cpu) + init_uclamp_rq(cpu_rq(cpu)); for_each_clamp_id(clamp_id) { uclamp_se_set(&init_task.uclamp_req[clamp_id], @@ -1649,7 +1737,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, goto out; } - if (cpumask_equal(p->cpus_ptr, new_mask)) + if (cpumask_equal(&p->cpus_mask, new_mask)) goto out; dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); @@ -2889,6 +2977,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) * Silence PROVE_RCU. */ raw_spin_lock_irqsave(&p->pi_lock, flags); + rseq_migrate(p); /* * We're setting the CPU for the first time, we don't migrate, * so use __set_task_cpu(). @@ -2953,6 +3042,7 @@ void wake_up_new_task(struct task_struct *p) * as we're not fully set-up yet. */ p->recent_used_cpu = task_cpu(p); + rseq_migrate(p); __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); #endif rq = __task_rq_lock(p, &rf); @@ -4447,7 +4537,8 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) */ if (dl_prio(prio)) { if (!dl_prio(p->normal_prio) || - (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { + (pi_task && dl_prio(pi_task->prio) && + dl_entity_preempt(&pi_task->dl, &p->dl))) { p->dl.dl_boosted = 1; queue_flag |= ENQUEUE_REPLENISH; } else @@ -6177,13 +6268,14 @@ void idle_task_exit(void) struct mm_struct *mm = current->active_mm; BUG_ON(cpu_online(smp_processor_id())); + BUG_ON(current != this_rq()->idle); if (mm != &init_mm) { switch_mm(mm, &init_mm, current); - current->active_mm = &init_mm; finish_arch_post_lock_switch(); } - mmdrop(mm); + + /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ } /* @@ -7279,6 +7371,8 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, if (req.ret) return req.ret; + static_branch_enable(&sched_uclamp_used); + mutex_lock(&uclamp_mutex); rcu_read_lock(); @@ -7373,6 +7467,8 @@ static DEFINE_MUTEX(cfs_constraints_mutex); const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ +/* More than 203 days if BW_SHIFT equals 20. */ +static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); @@ -7400,6 +7496,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) if (period > max_cfs_quota_period) return -EINVAL; + /* + * Bound quota to defend quota against overflow during bandwidth shift. + */ + if (quota != RUNTIME_INF && quota > max_cfs_runtime) + return -EINVAL; + /* * Prevent race between setting of cfs_rq->runtime_enabled and * unthrottle_offline_cfs_rqs(). diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index b6f56e7c8dd1..4cb80e6042c4 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -210,7 +210,7 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, unsigned long dl_util, util, irq; struct rq *rq = cpu_rq(cpu); - if (!IS_BUILTIN(CONFIG_UCLAMP_TASK) && + if (!uclamp_is_used() && type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { return max; } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 08bdee0480b3..4cb00538a207 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2693,6 +2693,7 @@ void __dl_clear_params(struct task_struct *p) dl_se->dl_bw = 0; dl_se->dl_density = 0; + dl_se->dl_boosted = 0; dl_se->dl_throttled = 0; dl_se->dl_yielded = 0; dl_se->dl_non_contending = 0; diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index f7e4579e746c..c4b702fe1d73 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -258,7 +258,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax); - set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax); + set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0444, proc_dointvec_minmax); set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax); set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring); /* &table[8] is terminator */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8a0e6bdba50d..20bf1f66733a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3824,7 +3824,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) return; } - rq->misfit_task_load = task_h_load(p); + /* + * Make sure that misfit_task_load will not be null even if + * task_h_load() returns 0. + */ + rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); } #else /* CONFIG_SMP */ @@ -4942,6 +4946,8 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) if (!overrun) break; + idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); + if (++count > 3) { u64 new, old = ktime_to_ns(cfs_b->period); @@ -4971,8 +4977,6 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) /* reset count so we don't come right back in here */ count = 0; } - - idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); } if (idle) cfs_b->period_active = 0; @@ -7407,7 +7411,15 @@ static int detach_tasks(struct lb_env *env) if (!can_migrate_task(p, env)) goto next; - load = task_h_load(p); + /* + * Depending of the number of CPUs and tasks and the + * cgroup hierarchy, task_h_load() can return a null + * value. Make sure that env->imbalance decreases + * otherwise detach_tasks() will stop only after + * detaching up to loop_max tasks. + */ + load = max_t(unsigned long, task_h_load(p), 1); + if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) goto next; @@ -9373,7 +9385,12 @@ static void kick_ilb(unsigned int flags) { int ilb_cpu; - nohz.next_balance++; + /* + * Increase nohz.next_balance only when if full ilb is triggered but + * not if we only update stats. + */ + if (flags & NOHZ_BALANCE_KICK) + nohz.next_balance = jiffies+1; ilb_cpu = find_new_ilb(); @@ -9691,6 +9708,14 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, } } + /* + * next_balance will be updated only when there is a need. + * When the CPU is attached to null domain for ex, it will not be + * updated. + */ + if (likely(update_next_balance)) + nohz.next_balance = next_balance; + /* Newly idle CPU doesn't need an update */ if (idle != CPU_NEWLY_IDLE) { update_blocked_averages(this_cpu); @@ -9711,14 +9736,6 @@ abort: if (has_blocked_load) WRITE_ONCE(nohz.has_blocked, 1); - /* - * next_balance will be updated only when there is a need. - * When the CPU is attached to null domain for ex, it will not be - * updated. - */ - if (likely(update_next_balance)) - nohz.next_balance = next_balance; - return ret; } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 7bf917e4d63a..5b04bba4500d 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -9,6 +9,8 @@ int sched_rr_timeslice = RR_TIMESLICE; int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; +/* More than 4 hours if BW_SHIFT equals 20. */ +static const u64 max_rt_runtime = MAX_BW; static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); @@ -2513,6 +2515,12 @@ static int tg_set_rt_bandwidth(struct task_group *tg, if (rt_period == 0) return -EINVAL; + /* + * Bound quota to defend quota against overflow during bandwidth shift. + */ + if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime) + return -EINVAL; + mutex_lock(&rt_constraints_mutex); read_lock(&tasklist_lock); err = __rt_schedulable(tg, rt_period, rt_runtime); @@ -2634,7 +2642,9 @@ static int sched_rt_global_validate(void) return -EINVAL; if ((sysctl_sched_rt_runtime != RUNTIME_INF) && - (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) + ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) || + ((u64)sysctl_sched_rt_runtime * + NSEC_PER_USEC > max_rt_runtime))) return -EINVAL; return 0; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c7e7481968bf..9f2a9e34a78d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -841,6 +841,8 @@ struct uclamp_rq { unsigned int value; struct uclamp_bucket bucket[UCLAMP_BUCKETS]; }; + +DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); #endif /* CONFIG_UCLAMP_TASK */ /* @@ -1889,6 +1891,8 @@ extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); #define BW_SHIFT 20 #define BW_UNIT (1 << BW_SHIFT) #define RATIO_SHIFT 8 +#define MAX_BW_BITS (64 - BW_SHIFT) +#define MAX_BW ((1ULL << MAX_BW_BITS) - 1) unsigned long to_ratio(u64 period, u64 runtime); extern void init_entity_runnable_average(struct sched_entity *se); @@ -2317,12 +2321,35 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} #ifdef CONFIG_UCLAMP_TASK unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); +/** + * uclamp_util_with - clamp @util with @rq and @p effective uclamp values. + * @rq: The rq to clamp against. Must not be NULL. + * @util: The util value to clamp. + * @p: The task to clamp against. Can be NULL if you want to clamp + * against @rq only. + * + * Clamps the passed @util to the max(@rq, @p) effective uclamp values. + * + * If sched_uclamp_used static key is disabled, then just return the util + * without any clamping since uclamp aggregation at the rq level in the fast + * path is disabled, rendering this operation a NOP. + * + * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It + * will return the correct effective uclamp value of the task even if the + * static key is disabled. + */ static __always_inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util, struct task_struct *p) { - unsigned int min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); - unsigned int max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); + unsigned int min_util; + unsigned int max_util; + + if (!static_branch_likely(&sched_uclamp_used)) + return util; + + min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); + max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); if (p) { min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN)); @@ -2344,6 +2371,19 @@ static inline unsigned int uclamp_util(struct rq *rq, unsigned int util) { return uclamp_util_with(rq, util, NULL); } + +/* + * When uclamp is compiled in, the aggregation at rq level is 'turned off' + * by default in the fast path and only gets turned on once userspace performs + * an operation that requires it. + * + * Returns true if userspace opted-in to use uclamp and aggregation at rq level + * hence is active. + */ +static inline bool uclamp_is_used(void) +{ + return static_branch_likely(&sched_uclamp_used); +} #else /* CONFIG_UCLAMP_TASK */ static inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util, struct task_struct *p) @@ -2354,6 +2394,11 @@ static inline unsigned int uclamp_util(struct rq *rq, unsigned int util) { return util; } + +static inline bool uclamp_is_used(void) +{ + return false; +} #endif /* CONFIG_UCLAMP_TASK */ #ifdef arch_scale_freq_capacity diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 1fa1e13a5944..ffaa97a8d405 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1333,7 +1333,7 @@ sd_init(struct sched_domain_topology_level *tl, sd_flags = (*tl->sd_flags)(); if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, "wrong sd_flags in topology description\n")) - sd_flags &= ~TOPOLOGY_SD_FLAGS; + sd_flags &= TOPOLOGY_SD_FLAGS; /* Apply detected topology flags */ sd_flags |= dflags; diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 2c697ce7be21..e0fd97235653 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -42,6 +42,14 @@ #include #include +/* + * When SECCOMP_IOCTL_NOTIF_ID_VALID was first introduced, it had the + * wrong direction flag in the ioctl number. This is the broken one, + * which the kernel needs to keep supporting until all userspaces stop + * using the wrong command number. + */ +#define SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR SECCOMP_IOR(2, __u64) + enum notify_state { SECCOMP_NOTIFY_INIT, SECCOMP_NOTIFY_SENT, @@ -1168,6 +1176,7 @@ static long seccomp_notify_ioctl(struct file *file, unsigned int cmd, return seccomp_notify_recv(filter, buf); case SECCOMP_IOCTL_NOTIF_SEND: return seccomp_notify_send(filter, buf); + case SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR: case SECCOMP_IOCTL_NOTIF_ID_VALID: return seccomp_notify_id_valid(filter, buf); default: diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 4820823515e9..a3ae244b1bcd 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -522,8 +523,8 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk) * Force expire obscene large timeouts to expire at the * capacity limit of the wheel. */ - if (expires >= WHEEL_TIMEOUT_CUTOFF) - expires = WHEEL_TIMEOUT_MAX; + if (delta >= WHEEL_TIMEOUT_CUTOFF) + expires = clk + WHEEL_TIMEOUT_MAX; idx = calc_index(expires, LVL_DEPTH - 1); } @@ -585,7 +586,15 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) * Set the next expiry time and kick the CPU so it can reevaluate the * wheel: */ - base->next_expiry = timer->expires; + if (time_before(timer->expires, base->clk)) { + /* + * Prevent from forward_timer_base() moving the base->clk + * backward + */ + base->next_expiry = base->clk; + } else { + base->next_expiry = timer->expires; + } wake_up_nohz_cpu(base->cpu); } @@ -897,10 +906,13 @@ static inline void forward_timer_base(struct timer_base *base) * If the next expiry value is > jiffies, then we fast forward to * jiffies otherwise we forward to the next expiry value. */ - if (time_after(base->next_expiry, jnow)) + if (time_after(base->next_expiry, jnow)) { base->clk = jnow; - else + } else { + if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) + return; base->clk = base->next_expiry; + } #endif } @@ -1731,6 +1743,13 @@ void update_process_times(int user_tick) scheduler_tick(); if (IS_ENABLED(CONFIG_POSIX_TIMERS)) run_posix_cpu_timers(); + + /* The current CPU might make use of net randoms without receiving IRQs + * to renew them often enough. Let's update the net_rand_state from a + * non-constant value that's not affine to the number of calls to make + * sure it's updated when there's some activity (we don't care in idle). + */ + this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick); } /** diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index e7e483cdbea6..884333b9fc76 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -3,6 +3,9 @@ * Copyright (C) 2006 Jens Axboe * */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -495,6 +498,16 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, */ strreplace(buts->name, '/', '_'); + /* + * bdev can be NULL, as with scsi-generic, this is a helpful as + * we can be. + */ + if (q->blk_trace) { + pr_warn("Concurrent blktraces are not allowed on %s\n", + buts->name); + return -EBUSY; + } + bt = kzalloc(sizeof(*bt), GFP_KERNEL); if (!bt) return -ENOMEM; @@ -508,12 +521,32 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, if (!bt->msg_data) goto err; - ret = -ENOENT; - - dir = debugfs_lookup(buts->name, blk_debugfs_root); - if (!dir) +#ifdef CONFIG_BLK_DEBUG_FS + /* + * When tracing whole make_request drivers (multiqueue) block devices, + * reuse the existing debugfs directory created by the block layer on + * init. For request-based block devices, all partitions block devices, + * and scsi-generic block devices we create a temporary new debugfs + * directory that will be removed once the trace ends. + */ + if (queue_is_mq(q) && bdev && bdev == bdev->bd_contains) + dir = q->debugfs_dir; + else +#endif bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root); + /* + * As blktrace relies on debugfs for its interface the debugfs directory + * is required, contrary to the usual mantra of not checking for debugfs + * files or directories. + */ + if (IS_ERR_OR_NULL(dir)) { + pr_warn("debugfs_dir not present for %s so skipping\n", + buts->name); + ret = -ENOENT; + goto err; + } + bt->dev = dev; atomic_set(&bt->dropped, 0); INIT_LIST_HEAD(&bt->running_list); @@ -552,8 +585,6 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, ret = 0; err: - if (dir && !bt->dir) - dput(dir); if (ret) blk_trace_free(bt); return ret; @@ -999,8 +1030,10 @@ static void blk_add_trace_split(void *ignore, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, - BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), - &rpdu, blk_trace_bio_get_cgid(q, bio)); + BLK_TA_SPLIT, + blk_status_to_errno(bio->bi_status), + sizeof(rpdu), &rpdu, + blk_trace_bio_get_cgid(q, bio)); } rcu_read_unlock(); } @@ -1037,7 +1070,8 @@ static void blk_add_trace_bio_remap(void *ignore, r.sector_from = cpu_to_be64(from); __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, - bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, + bio_op(bio), bio->bi_opf, BLK_TA_REMAP, + blk_status_to_errno(bio->bi_status), sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); rcu_read_unlock(); } @@ -1259,21 +1293,10 @@ static inline __u16 t_error(const struct trace_entry *ent) static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) { - const __u64 *val = pdu_start(ent, has_cg); + const __be64 *val = pdu_start(ent, has_cg); return be64_to_cpu(*val); } -static void get_pdu_remap(const struct trace_entry *ent, - struct blk_io_trace_remap *r, bool has_cg) -{ - const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); - __u64 sector_from = __r->sector_from; - - r->device_from = be32_to_cpu(__r->device_from); - r->device_to = be32_to_cpu(__r->device_to); - r->sector_from = be64_to_cpu(sector_from); -} - typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act, bool has_cg); @@ -1399,13 +1422,13 @@ static void blk_log_with_error(struct trace_seq *s, static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) { - struct blk_io_trace_remap r = { .device_from = 0, }; + const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); - get_pdu_remap(ent, &r, has_cg); trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", t_sector(ent), t_sec(ent), - MAJOR(r.device_from), MINOR(r.device_from), - (unsigned long long)r.sector_from); + MAJOR(be32_to_cpu(__r->device_from)), + MINOR(be32_to_cpu(__r->device_from)), + be64_to_cpu(__r->sector_from)); } static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 15160d707da4..705852c1724a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5699,8 +5699,11 @@ static int referenced_filters(struct dyn_ftrace *rec) int cnt = 0; for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { - if (ops_references_rec(ops, rec)) - cnt++; + if (ops_references_rec(ops, rec)) { + cnt++; + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) + rec->flags |= FTRACE_FL_REGS; + } } return cnt; @@ -5877,8 +5880,8 @@ void ftrace_module_enable(struct module *mod) if (ftrace_start_up) cnt += referenced_filters(rec); - /* This clears FTRACE_FL_DISABLED */ - rec->flags = cnt; + rec->flags &= ~FTRACE_FL_DISABLED; + rec->flags += cnt; if (ftrace_start_up && cnt) { int failed = __ftrace_replace_code(rec, 1); @@ -6459,12 +6462,12 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) if (enable) { register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, tr); - register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, + register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, tr); } else { unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, tr); - unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, + unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, tr); } } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 4bf050fcfe3b..9a2581fe7ed5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2358,7 +2358,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, if (unlikely(info->add_timestamp)) { bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer); - event = rb_add_time_stamp(event, info->delta, abs); + event = rb_add_time_stamp(event, abs ? info->delta : delta, abs); length -= RB_LEN_TIME_EXTEND; delta = 0; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 721947b9962d..f9c2bdbbd893 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -5686,7 +5686,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) } /* If trace pipe files are being read, we can't change the tracer */ - if (tr->current_trace->ref) { + if (tr->trace_ref) { ret = -EBUSY; goto out; } @@ -5902,7 +5902,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) nonseekable_open(inode, filp); - tr->current_trace->ref++; + tr->trace_ref++; out: mutex_unlock(&trace_types_lock); return ret; @@ -5921,7 +5921,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) mutex_lock(&trace_types_lock); - tr->current_trace->ref--; + tr->trace_ref--; if (iter->trace->pipe_close) iter->trace->pipe_close(iter); @@ -7230,7 +7230,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) filp->private_data = info; - tr->current_trace->ref++; + tr->trace_ref++; mutex_unlock(&trace_types_lock); @@ -7331,7 +7331,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) mutex_lock(&trace_types_lock); - iter->tr->current_trace->ref--; + iter->tr->trace_ref--; __trace_array_put(iter->tr); @@ -8470,7 +8470,7 @@ static int __remove_instance(struct trace_array *tr) { int i; - if (tr->ref || (tr->current_trace && tr->current_trace->ref)) + if (tr->ref || (tr->current_trace && tr->trace_ref)) return -EBUSY; list_del(&tr->list); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index a3c29d5fcc61..4055158c1dd2 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -309,6 +309,7 @@ struct trace_array { struct trace_event_file *trace_marker_file; cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ int ref; + int trace_ref; #ifdef CONFIG_FUNCTION_TRACER struct ftrace_ops *ops; struct trace_pid_list __rcu *function_pids; @@ -498,7 +499,6 @@ struct tracer { struct tracer *next; struct tracer_flags *flags; int enabled; - int ref; bool print_max; bool allow_instances; #ifdef CONFIG_TRACER_MAX_TRACE diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 995061bb2dec..ed9eb97b64b4 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -527,12 +527,12 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable) if (enable) { register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork, tr, INT_MIN); - register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit, + register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit, tr, INT_MAX); } else { unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork, tr); - unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit, + unregister_trace_sched_process_free(event_filter_pid_sched_process_exit, tr); } } diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index de840de87a18..e913d41a4194 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -216,11 +216,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file) static int trigger_process_regex(struct trace_event_file *file, char *buff) { - char *command, *next = buff; + char *command, *next; struct event_command *p; int ret = -EINVAL; + next = buff = skip_spaces(buff); command = strsep(&next, ": \t"); + if (next) { + next = skip_spaces(next); + if (!*next) + next = NULL; + } command = (command[0] != '!') ? command : command + 1; mutex_lock(&trigger_cmd_mutex); @@ -630,8 +636,14 @@ event_trigger_callback(struct event_command *cmd_ops, int ret; /* separate the trigger from the filter (t:n [if filter]) */ - if (param && isdigit(param[0])) + if (param && isdigit(param[0])) { trigger = strsep(¶m, " \t"); + if (param) { + param = skip_spaces(param); + if (!*param) + param = NULL; + } + } trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); @@ -1368,6 +1380,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops, trigger = strsep(¶m, " \t"); if (!trigger) return -EINVAL; + if (param) { + param = skip_spaces(param); + if (!*param) + param = NULL; + } system = strsep(&trigger, ":"); if (!trigger) diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index 862f4b0139fc..35512ed26d9f 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -270,6 +270,7 @@ static bool disable_migrate; static void move_to_next_cpu(void) { struct cpumask *current_mask = &save_cpumask; + struct trace_array *tr = hwlat_trace; int next_cpu; if (disable_migrate) @@ -283,7 +284,7 @@ static void move_to_next_cpu(void) goto disable; get_online_cpus(); - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); next_cpu = cpumask_next(smp_processor_id(), current_mask); put_online_cpus(); @@ -360,7 +361,7 @@ static int start_kthread(struct trace_array *tr) /* Just pick the first CPU on first iteration */ current_mask = &save_cpumask; get_online_cpus(); - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); put_online_cpus(); next_cpu = cpumask_first(current_mask); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index fba4b48451f6..26de9c654956 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1464,7 +1464,7 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type, if (perf_type_tracepoint) tk = find_trace_kprobe(pevent, group); else - tk = event->tp_event->data; + tk = trace_kprobe_primary_from_call(event->tp_event); if (!tk) return -EINVAL; diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index ab8b6436d53f..f98d6d94cbbf 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -639,8 +639,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, ret = -EINVAL; goto fail; } - if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM) || - parg->count) { + if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM || + code->op == FETCH_OP_DATA) || parg->count) { /* * IMM, DATA and COMM is pointing actual address, those * must be kept, and if parg->count != 0, this is an diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 2619bc5ed520..5294843de6ef 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1405,7 +1405,7 @@ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, if (perf_type_tracepoint) tu = find_probe_event(pevent, group); else - tu = event->tp_event->data; + tu = trace_uprobe_primary_from_call(event->tp_event); if (!tu) return -EINVAL; diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index 8cc01a603416..c9acf1c12cfc 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -19,39 +19,46 @@ static struct crypto_shash __rcu *crct10dif_tfm; static struct static_key crct10dif_fallback __read_mostly; static DEFINE_MUTEX(crc_t10dif_mutex); +static struct work_struct crct10dif_rehash_work; -static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data) +static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data) { struct crypto_alg *alg = data; - struct crypto_shash *new, *old; if (val != CRYPTO_MSG_ALG_LOADED || static_key_false(&crct10dif_fallback) || strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING))) return 0; + schedule_work(&crct10dif_rehash_work); + return 0; +} + +static void crc_t10dif_rehash(struct work_struct *work) +{ + struct crypto_shash *new, *old; + mutex_lock(&crc_t10dif_mutex); old = rcu_dereference_protected(crct10dif_tfm, lockdep_is_held(&crc_t10dif_mutex)); if (!old) { mutex_unlock(&crc_t10dif_mutex); - return 0; + return; } new = crypto_alloc_shash("crct10dif", 0, 0); if (IS_ERR(new)) { mutex_unlock(&crc_t10dif_mutex); - return 0; + return; } rcu_assign_pointer(crct10dif_tfm, new); mutex_unlock(&crc_t10dif_mutex); synchronize_rcu(); crypto_free_shash(old); - return 0; } static struct notifier_block crc_t10dif_nb = { - .notifier_call = crc_t10dif_rehash, + .notifier_call = crc_t10dif_notify, }; __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) @@ -86,19 +93,26 @@ EXPORT_SYMBOL(crc_t10dif); static int __init crc_t10dif_mod_init(void) { + struct crypto_shash *tfm; + + INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash); crypto_register_notifier(&crc_t10dif_nb); - crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); - if (IS_ERR(crct10dif_tfm)) { + mutex_lock(&crc_t10dif_mutex); + tfm = crypto_alloc_shash("crct10dif", 0, 0); + if (IS_ERR(tfm)) { static_key_slow_inc(&crct10dif_fallback); - crct10dif_tfm = NULL; + tfm = NULL; } + RCU_INIT_POINTER(crct10dif_tfm, tfm); + mutex_unlock(&crc_t10dif_mutex); return 0; } static void __exit crc_t10dif_mod_fini(void) { crypto_unregister_notifier(&crc_t10dif_nb); - crypto_free_shash(crct10dif_tfm); + cancel_work_sync(&crct10dif_rehash_work); + crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1)); } module_init(crc_t10dif_mod_init); @@ -106,11 +120,27 @@ module_exit(crc_t10dif_mod_fini); static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp) { + struct crypto_shash *tfm; + const char *name; + int len; + if (static_key_false(&crct10dif_fallback)) return sprintf(buffer, "fallback\n"); - return sprintf(buffer, "%s\n", - crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm))); + rcu_read_lock(); + tfm = rcu_dereference(crct10dif_tfm); + if (!tfm) { + len = sprintf(buffer, "init\n"); + goto unlock; + } + + name = crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); + len = sprintf(buffer, "%s\n", name); + +unlock: + rcu_read_unlock(); + + return len; } module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644); diff --git a/lib/devres.c b/lib/devres.c index 17624d35e82d..77c80ca9e485 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -155,6 +155,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, { resource_size_t size; void __iomem *dest_ptr; + char *pretty_name; BUG_ON(!dev); @@ -165,7 +166,15 @@ void __iomem *devm_ioremap_resource(struct device *dev, size = resource_size(res); - if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) { + if (res->name) + pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", + dev_name(dev), res->name); + else + pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); + if (!pretty_name) + return IOMEM_ERR_PTR(-ENOMEM); + + if (!devm_request_mem_region(dev, res->start, size, pretty_name)) { dev_err(dev, "can't request region for resource %pR\n", res); return IOMEM_ERR_PTR(-EBUSY); } diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index c60409138e13..ccf05719b1ad 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -87,22 +87,22 @@ static struct { unsigned flag:8; char opt_char; } opt_array[] = { { _DPRINTK_FLAGS_NONE, '_' }, }; +struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; }; + /* format a string into buf[] which describes the _ddebug's flags */ -static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, - size_t maxlen) +static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb) { - char *p = buf; + char *p = fb->buf; int i; - BUG_ON(maxlen < 6); for (i = 0; i < ARRAY_SIZE(opt_array); ++i) - if (dp->flags & opt_array[i].flag) + if (flags & opt_array[i].flag) *p++ = opt_array[i].opt_char; - if (p == buf) + if (p == fb->buf) *p++ = '_'; *p = '\0'; - return buf; + return fb->buf; } #define vpr_info(fmt, ...) \ @@ -144,7 +144,7 @@ static int ddebug_change(const struct ddebug_query *query, struct ddebug_table *dt; unsigned int newflags; unsigned int nfound = 0; - char flagbuf[10]; + struct flagsbuf fbuf; /* search for matching ddebugs */ mutex_lock(&ddebug_lock); @@ -201,8 +201,7 @@ static int ddebug_change(const struct ddebug_query *query, vpr_info("changed %s:%d [%s]%s =%s\n", trim_prefix(dp->filename), dp->lineno, dt->mod_name, dp->function, - ddebug_describe_flags(dp, flagbuf, - sizeof(flagbuf))); + ddebug_describe_flags(dp->flags, &fbuf)); } } mutex_unlock(&ddebug_lock); @@ -816,7 +815,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) { struct ddebug_iter *iter = m->private; struct _ddebug *dp = p; - char flagsbuf[10]; + struct flagsbuf flags; vpr_info("called m=%p p=%p\n", m, p); @@ -829,7 +828,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) seq_printf(m, "%s:%u [%s]%s =%s \"", trim_prefix(dp->filename), dp->lineno, iter->table->mod_name, dp->function, - ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf))); + ddebug_describe_flags(dp->flags, &flags)); seq_escape(m, dp->format, "\t\r\n\""); seq_puts(m, "\"\n"); diff --git a/lib/kobject.c b/lib/kobject.c index 83198cb37d8d..0c6d17503a11 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -599,14 +599,7 @@ out: } EXPORT_SYMBOL_GPL(kobject_move); -/** - * kobject_del() - Unlink kobject from hierarchy. - * @kobj: object. - * - * This is the function that should be called to delete an object - * successfully added via kobject_add(). - */ -void kobject_del(struct kobject *kobj) +static void __kobject_del(struct kobject *kobj) { struct kernfs_node *sd; const struct kobj_type *ktype; @@ -625,9 +618,27 @@ void kobject_del(struct kobject *kobj) kobj->state_in_sysfs = 0; kobj_kset_leave(kobj); - kobject_put(kobj->parent); kobj->parent = NULL; } + +/** + * kobject_del() - Unlink kobject from hierarchy. + * @kobj: object. + * + * This is the function that should be called to delete an object + * successfully added via kobject_add(). + */ +void kobject_del(struct kobject *kobj) +{ + struct kobject *parent; + + if (!kobj) + return; + + parent = kobj->parent; + __kobject_del(kobj); + kobject_put(parent); +} EXPORT_SYMBOL(kobject_del); /** @@ -663,6 +674,7 @@ EXPORT_SYMBOL(kobject_get_unless_zero); */ static void kobject_cleanup(struct kobject *kobj) { + struct kobject *parent = kobj->parent; struct kobj_type *t = get_ktype(kobj); const char *name = kobj->name; @@ -684,7 +696,10 @@ static void kobject_cleanup(struct kobject *kobj) if (kobj->state_in_sysfs) { pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n", kobject_name(kobj), kobj); - kobject_del(kobj); + __kobject_del(kobj); + } else { + /* avoid dropping the parent reference unnecessarily */ + parent = NULL; } if (t && t->release) { @@ -698,6 +713,8 @@ static void kobject_cleanup(struct kobject *kobj) pr_debug("kobject: '%s': free name\n", name); kfree_const(name); } + + kobject_put(parent); } #ifdef CONFIG_DEBUG_KOBJECT_RELEASE diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index 891e1c3549c4..afbd99987cf8 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -653,7 +653,7 @@ do { \ ************** MIPS/64 ************** ***************************************/ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 -#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 +#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC) /* * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C * code below, so we special case MIPS64r6 until the compiler can do better. diff --git a/lib/random32.c b/lib/random32.c index 763b920a6206..3d749abb9e80 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void) } #endif -static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; +DEFINE_PER_CPU(struct rnd_state, net_rand_state); /** * prandom_u32_state - seeded pseudo-random number generator. diff --git a/lib/test_kasan.c b/lib/test_kasan.c index bd3d9ef7d39e..83344c9c38f4 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -22,6 +22,14 @@ #include +/* + * We assign some test results to these globals to make sure the tests + * are not eliminated as dead code. + */ + +int kasan_int_result; +void *kasan_ptr_result; + /* * Note: test functions are marked noinline so that their names appear in * reports. @@ -603,7 +611,7 @@ static noinline void __init kasan_memchr(void) if (!ptr) return; - memchr(ptr, '1', size + 1); + kasan_ptr_result = memchr(ptr, '1', size + 1); kfree(ptr); } @@ -619,7 +627,7 @@ static noinline void __init kasan_memcmp(void) return; memset(arr, 0, sizeof(arr)); - memcmp(ptr, arr, size+1); + kasan_int_result = memcmp(ptr, arr, size + 1); kfree(ptr); } @@ -642,22 +650,22 @@ static noinline void __init kasan_strings(void) * will likely point to zeroed byte. */ ptr += 16; - strchr(ptr, '1'); + kasan_ptr_result = strchr(ptr, '1'); pr_info("use-after-free in strrchr\n"); - strrchr(ptr, '1'); + kasan_ptr_result = strrchr(ptr, '1'); pr_info("use-after-free in strcmp\n"); - strcmp(ptr, "2"); + kasan_int_result = strcmp(ptr, "2"); pr_info("use-after-free in strncmp\n"); - strncmp(ptr, "2", 1); + kasan_int_result = strncmp(ptr, "2", 1); pr_info("use-after-free in strlen\n"); - strlen(ptr); + kasan_int_result = strlen(ptr); pr_info("use-after-free in strnlen\n"); - strnlen(ptr, 1); + kasan_int_result = strnlen(ptr, 1); } static noinline void __init kasan_bitops(void) @@ -724,11 +732,12 @@ static noinline void __init kasan_bitops(void) __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits); pr_info("out-of-bounds in test_bit\n"); - (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits); + kasan_int_result = test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits); #if defined(clear_bit_unlock_is_negative_byte) pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n"); - clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits); + kasan_int_result = clear_bit_unlock_is_negative_byte(BITS_PER_LONG + + BITS_PER_BYTE, bits); #endif kfree(bits); } diff --git a/lib/test_kmod.c b/lib/test_kmod.c index 9cf77628fc91..87a0cc750ea2 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -745,7 +745,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev, break; case TEST_KMOD_FS_TYPE: kfree_const(config->test_fs); - config->test_driver = NULL; + config->test_fs = NULL; copied = config_copy_test_fs(config, test_str, strlen(test_str)); break; diff --git a/lib/test_objagg.c b/lib/test_objagg.c index 72c1abfa154d..da137939a410 100644 --- a/lib/test_objagg.c +++ b/lib/test_objagg.c @@ -979,10 +979,10 @@ err_check_expect_stats2: err_world2_obj_get: for (i--; i >= 0; i--) world_obj_put(&world2, objagg, hints_case->key_ids[i]); - objagg_hints_put(hints); - objagg_destroy(objagg2); i = hints_case->key_ids_count; + objagg_destroy(objagg2); err_check_expect_hints_stats: + objagg_hints_put(hints); err_hints_get: err_check_expect_stats: err_world_obj_get: diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index 2c13ecc5bb2c..ed1f3df27260 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c @@ -10,17 +10,6 @@ #ifndef ASMINF -/* Allow machine dependent optimization for post-increment or pre-increment. - Based on testing to date, - Pre-increment preferred for: - - PowerPC G3 (Adler) - - MIPS R5000 (Randers-Pehrson) - Post-increment preferred for: - - none - No measurable difference: - - Pentium III (Anderson) - - M68060 (Nikl) - */ union uu { unsigned short us; unsigned char b[2]; @@ -38,16 +27,6 @@ get_unaligned16(const unsigned short *p) return mm.us; } -#ifdef POSTINC -# define OFF 0 -# define PUP(a) *(a)++ -# define UP_UNALIGNED(a) get_unaligned16((a)++) -#else -# define OFF 1 -# define PUP(a) *++(a) -# define UP_UNALIGNED(a) get_unaligned16(++(a)) -#endif - /* Decode literal, length, and distance codes and write out the resulting literal and match bytes until either not enough input or output is @@ -115,9 +94,9 @@ void inflate_fast(z_streamp strm, unsigned start) /* copy state to local variables */ state = (struct inflate_state *)strm->state; - in = strm->next_in - OFF; + in = strm->next_in; last = in + (strm->avail_in - 5); - out = strm->next_out - OFF; + out = strm->next_out; beg = out - (start - strm->avail_out); end = out + (strm->avail_out - 257); #ifdef INFLATE_STRICT @@ -138,9 +117,9 @@ void inflate_fast(z_streamp strm, unsigned start) input data or output space */ do { if (bits < 15) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } this = lcode[hold & lmask]; @@ -150,14 +129,14 @@ void inflate_fast(z_streamp strm, unsigned start) bits -= op; op = (unsigned)(this.op); if (op == 0) { /* literal */ - PUP(out) = (unsigned char)(this.val); + *out++ = (unsigned char)(this.val); } else if (op & 16) { /* length base */ len = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } len += (unsigned)hold & ((1U << op) - 1); @@ -165,9 +144,9 @@ void inflate_fast(z_streamp strm, unsigned start) bits -= op; } if (bits < 15) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } this = dcode[hold & dmask]; @@ -180,10 +159,10 @@ void inflate_fast(z_streamp strm, unsigned start) dist = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } } @@ -205,13 +184,13 @@ void inflate_fast(z_streamp strm, unsigned start) state->mode = BAD; break; } - from = window - OFF; + from = window; if (write == 0) { /* very common case */ from += wsize - op; if (op < len) { /* some from window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } @@ -222,14 +201,14 @@ void inflate_fast(z_streamp strm, unsigned start) if (op < len) { /* some from end of window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); - from = window - OFF; + from = window; if (write < len) { /* some from start of window */ op = write; len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } @@ -240,21 +219,21 @@ void inflate_fast(z_streamp strm, unsigned start) if (op < len) { /* some from window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } while (len > 2) { - PUP(out) = PUP(from); - PUP(out) = PUP(from); - PUP(out) = PUP(from); + *out++ = *from++; + *out++ = *from++; + *out++ = *from++; len -= 3; } if (len) { - PUP(out) = PUP(from); + *out++ = *from++; if (len > 1) - PUP(out) = PUP(from); + *out++ = *from++; } } else { @@ -264,29 +243,29 @@ void inflate_fast(z_streamp strm, unsigned start) from = out - dist; /* copy direct from output */ /* minimum length is three */ /* Align out addr */ - if (!((long)(out - 1 + OFF) & 1)) { - PUP(out) = PUP(from); + if (!((long)(out - 1) & 1)) { + *out++ = *from++; len--; } - sout = (unsigned short *)(out - OFF); + sout = (unsigned short *)(out); if (dist > 2) { unsigned short *sfrom; - sfrom = (unsigned short *)(from - OFF); + sfrom = (unsigned short *)(from); loops = len >> 1; do #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - PUP(sout) = PUP(sfrom); + *sout++ = *sfrom++; #else - PUP(sout) = UP_UNALIGNED(sfrom); + *sout++ = get_unaligned16(sfrom++); #endif while (--loops); - out = (unsigned char *)sout + OFF; - from = (unsigned char *)sfrom + OFF; + out = (unsigned char *)sout; + from = (unsigned char *)sfrom; } else { /* dist == 1 or dist == 2 */ unsigned short pat16; - pat16 = *(sout-1+OFF); + pat16 = *(sout-1); if (dist == 1) { union uu mm; /* copy one char pattern to both bytes */ @@ -296,12 +275,12 @@ void inflate_fast(z_streamp strm, unsigned start) } loops = len >> 1; do - PUP(sout) = pat16; + *sout++ = pat16; while (--loops); - out = (unsigned char *)sout + OFF; + out = (unsigned char *)sout; } if (len & 1) - PUP(out) = PUP(from); + *out++ = *from++; } } else if ((op & 64) == 0) { /* 2nd level distance code */ @@ -336,8 +315,8 @@ void inflate_fast(z_streamp strm, unsigned start) hold &= (1U << bits) - 1; /* update state and return */ - strm->next_in = in + OFF; - strm->next_out = out + OFF; + strm->next_in = in; + strm->next_out = out; strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); strm->avail_out = (unsigned)(out < end ? 257 + (end - out) : 257 - (out - end)); diff --git a/mm/cma.c b/mm/cma.c index 7fe0b8356775..7de520c0a1db 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -93,19 +93,15 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, mutex_unlock(&cma->lock); } -static int __init cma_activate_area(struct cma *cma) +static void __init cma_activate_area(struct cma *cma) { - int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; unsigned i = cma->count >> pageblock_order; struct zone *zone; - cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - - if (!cma->bitmap) { - cma->count = 0; - return -ENOMEM; - } + cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); + if (!cma->bitmap) + goto out_error; WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); @@ -135,25 +131,22 @@ static int __init cma_activate_area(struct cma *cma) spin_lock_init(&cma->mem_head_lock); #endif - return 0; + return; not_in_zone: - pr_err("CMA area %s could not be activated\n", cma->name); - kfree(cma->bitmap); + bitmap_free(cma->bitmap); +out_error: cma->count = 0; - return -EINVAL; + pr_err("CMA area %s could not be activated\n", cma->name); + return; } static int __init cma_init_reserved_areas(void) { int i; - for (i = 0; i < cma_area_count; i++) { - int ret = cma_activate_area(&cma_areas[i]); - - if (ret) - return ret; - } + for (i = 0; i < cma_area_count; i++) + cma_activate_area(&cma_areas[i]); return 0; } diff --git a/mm/compaction.c b/mm/compaction.c index 672d3c78c6ab..92470625f0b1 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2310,16 +2310,26 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, .page = NULL, }; - if (capture) - current->capture_control = &capc; + /* + * Make sure the structs are really initialized before we expose the + * capture control, in case we are interrupted and the interrupt handler + * frees a page. + */ + barrier(); + WRITE_ONCE(current->capture_control, &capc); ret = compact_zone(&cc, &capc); VM_BUG_ON(!list_empty(&cc.freepages)); VM_BUG_ON(!list_empty(&cc.migratepages)); - *capture = capc.page; - current->capture_control = NULL; + /* + * Make sure we hide capture control first before we read the captured + * page pointer, otherwise an interrupt could free and capture a page + * and we would leak it. + */ + WRITE_ONCE(current->capture_control, NULL); + *capture = READ_ONCE(capc.page); return ret; } @@ -2333,6 +2343,7 @@ int sysctl_extfrag_threshold = 500; * @alloc_flags: The allocation flags of the current allocation * @ac: The context of current allocation * @prio: Determines how hard direct compaction should try to succeed + * @capture: Pointer to free page created by compaction will be stored here * * This is the main entry point for direct page compaction. */ diff --git a/mm/filemap.c b/mm/filemap.c index 1f5731768222..18c1f5830074 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2438,7 +2438,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf, pgoff_t offset = vmf->pgoff; /* If we don't want any read-ahead, don't bother */ - if (vmf->vma->vm_flags & VM_RAND_READ) + if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) return fpin; if (ra->mmap_miss > 0) ra->mmap_miss--; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7ec5710afc99..da9040a6838f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2301,6 +2301,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, { spinlock_t *ptl; struct mmu_notifier_range range; + bool was_locked = false; + pmd_t _pmd; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, address & HPAGE_PMD_MASK, @@ -2313,11 +2315,32 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, * pmd against. Otherwise we can end up replacing wrong page. */ VM_BUG_ON(freeze && !page); - if (page && page != pmd_page(*pmd)) - goto out; + if (page) { + VM_WARN_ON_ONCE(!PageLocked(page)); + was_locked = true; + if (page != pmd_page(*pmd)) + goto out; + } +repeat: if (pmd_trans_huge(*pmd)) { - page = pmd_page(*pmd); + if (!page) { + page = pmd_page(*pmd); + if (unlikely(!trylock_page(page))) { + get_page(page); + _pmd = *pmd; + spin_unlock(ptl); + lock_page(page); + spin_lock(ptl); + if (unlikely(!pmd_same(*pmd, _pmd))) { + unlock_page(page); + put_page(page); + page = NULL; + goto repeat; + } + put_page(page); + } + } if (PageMlocked(page)) clear_page_mlock(page); } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) @@ -2325,6 +2348,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, __split_huge_pmd_locked(vma, pmd, range.start, freeze); out: spin_unlock(ptl); + if (!was_locked && page) + unlock_page(page); /* * No need to double call mmu_notifier->invalidate_range() callback. * They are 3 cases to consider inside __split_huge_pmd_locked(): diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2af1831596f2..62ec514dae65 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3082,6 +3082,22 @@ static unsigned int cpuset_mems_nr(unsigned int *array) } #ifdef CONFIG_SYSCTL +static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, + void *buffer, size_t *length, + loff_t *ppos, unsigned long *out) +{ + struct ctl_table dup_table; + + /* + * In order to avoid races with __do_proc_doulongvec_minmax(), we + * can duplicate the @table and alter the duplicate of it. + */ + dup_table = *table; + dup_table.data = out; + + return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); +} + static int hugetlb_sysctl_handler_common(bool obey_mempolicy, struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) @@ -3093,9 +3109,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, if (!hugepages_supported()) return -EOPNOTSUPP; - table->data = &tmp; - table->maxlen = sizeof(unsigned long); - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, + &tmp); if (ret) goto out; @@ -3139,9 +3154,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, if (write && hstate_is_gigantic(h)) return -EINVAL; - table->data = &tmp; - table->maxlen = sizeof(unsigned long); - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, + &tmp); if (ret) goto out; @@ -4846,25 +4860,21 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { - unsigned long check_addr = *start; + unsigned long a_start, a_end; if (!(vma->vm_flags & VM_MAYSHARE)) return; - for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) { - unsigned long a_start = check_addr & PUD_MASK; - unsigned long a_end = a_start + PUD_SIZE; + /* Extend the range to be PUD aligned for a worst case scenario */ + a_start = ALIGN_DOWN(*start, PUD_SIZE); + a_end = ALIGN(*end, PUD_SIZE); - /* - * If sharing is possible, adjust start/end if necessary. - */ - if (range_in_vma(vma, a_start, a_end)) { - if (a_start < *start) - *start = a_start; - if (a_end > *end) - *end = a_end; - } - } + /* + * Intersect the range with the vma range, since pmd sharing won't be + * across vma after all + */ + *start = max(vma->vm_start, a_start); + *end = min(vma->vm_end, a_end); } /* diff --git a/mm/khugepaged.c b/mm/khugepaged.c index f765475be359..9ec618d5ea55 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -401,7 +401,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm, static inline int khugepaged_test_exit(struct mm_struct *mm) { - return atomic_read(&mm->mm_users) == 0; + return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm); } static bool hugepage_vma_check(struct vm_area_struct *vma, @@ -438,7 +438,7 @@ int __khugepaged_enter(struct mm_struct *mm) return -ENOMEM; /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); + VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return 0; @@ -876,6 +876,9 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, return SCAN_ADDRESS_RANGE; if (!hugepage_vma_check(vma, vma->vm_flags)) return SCAN_VMA_CHECK; + /* Anon VMA expected */ + if (!vma->anon_vma || vma->vm_ops) + return SCAN_VMA_CHECK; return 0; } @@ -1016,9 +1019,6 @@ static void collapse_huge_page(struct mm_struct *mm, * handled by the anon_vma lock + PG_lock. */ down_write(&mm->mmap_sem); - result = SCAN_ANY_PROCESS; - if (!mmget_still_valid(mm)) - goto out; result = hugepage_vma_revalidate(mm, address, &vma); if (result) goto out; @@ -1291,7 +1291,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) { unsigned long haddr = addr & HPAGE_PMD_MASK; struct vm_area_struct *vma = find_vma(mm, haddr); - struct page *hpage = NULL; + struct page *hpage; pte_t *start_pte, *pte; pmd_t *pmd, _pmd; spinlock_t *ptl; @@ -1311,9 +1311,17 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE)) return; + hpage = find_lock_page(vma->vm_file->f_mapping, + linear_page_index(vma, haddr)); + if (!hpage) + return; + + if (!PageHead(hpage)) + goto drop_hpage; + pmd = mm_find_pmd(mm, haddr); if (!pmd) - return; + goto drop_hpage; start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); @@ -1332,30 +1340,11 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) page = vm_normal_page(vma, addr, *pte); - if (!page || !PageCompound(page)) - goto abort; - - if (!hpage) { - hpage = compound_head(page); - /* - * The mapping of the THP should not change. - * - * Note that uprobe, debugger, or MAP_PRIVATE may - * change the page table, but the new page will - * not pass PageCompound() check. - */ - if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping)) - goto abort; - } - /* - * Confirm the page maps to the correct subpage. - * - * Note that uprobe, debugger, or MAP_PRIVATE may change - * the page table, but the new page will not pass - * PageCompound() check. + * Note that uprobe, debugger, or MAP_PRIVATE may change the + * page table, but the new page will not be a subpage of hpage. */ - if (WARN_ON(hpage + i != page)) + if (hpage + i != page) goto abort; count++; } @@ -1374,21 +1363,26 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) pte_unmap_unlock(start_pte, ptl); /* step 3: set proper refcount and mm_counters. */ - if (hpage) { + if (count) { page_ref_sub(hpage, count); add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); } /* step 4: collapse pmd */ ptl = pmd_lock(vma->vm_mm, pmd); - _pmd = pmdp_collapse_flush(vma, addr, pmd); + _pmd = pmdp_collapse_flush(vma, haddr, pmd); spin_unlock(ptl); mm_dec_nr_ptes(mm); pte_free(mm, pmd_pgtable(_pmd)); + +drop_hpage: + unlock_page(hpage); + put_page(hpage); return; abort: pte_unmap_unlock(start_pte, ptl); + goto drop_hpage; } static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) @@ -1417,6 +1411,7 @@ out: static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) { struct vm_area_struct *vma; + struct mm_struct *mm; unsigned long addr; pmd_t *pmd, _pmd; @@ -1445,7 +1440,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) continue; if (vma->vm_end < addr + HPAGE_PMD_SIZE) continue; - pmd = mm_find_pmd(vma->vm_mm, addr); + mm = vma->vm_mm; + pmd = mm_find_pmd(mm, addr); if (!pmd) continue; /* @@ -1455,17 +1451,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * mmap_sem while holding page lock. Fault path does it in * reverse order. Trylock is a way to avoid deadlock. */ - if (down_write_trylock(&vma->vm_mm->mmap_sem)) { - spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); - /* assume page table is clear */ - _pmd = pmdp_collapse_flush(vma, addr, pmd); - spin_unlock(ptl); - up_write(&vma->vm_mm->mmap_sem); - mm_dec_nr_ptes(vma->vm_mm); - pte_free(vma->vm_mm, pmd_pgtable(_pmd)); + if (down_write_trylock(&mm->mmap_sem)) { + if (!khugepaged_test_exit(mm)) { + spinlock_t *ptl = pmd_lock(mm, pmd); + /* assume page table is clear */ + _pmd = pmdp_collapse_flush(vma, addr, pmd); + spin_unlock(ptl); + mm_dec_nr_ptes(mm); + pte_free(mm, pmd_pgtable(_pmd)); + } + up_write(&mm->mmap_sem); } else { /* Try again later */ - khugepaged_add_pte_mapped_thp(vma->vm_mm, addr); + khugepaged_add_pte_mapped_thp(mm, addr); } } i_mmap_unlock_write(mapping); @@ -1594,7 +1592,7 @@ static void collapse_file(struct mm_struct *mm, xas_unlock_irq(&xas); page_cache_sync_readahead(mapping, &file->f_ra, file, index, - PAGE_SIZE); + end - index); /* drain pagevecs to help isolate_lru_page() */ lru_add_drain(); page = find_lock_page(mapping, index); diff --git a/mm/madvise.c b/mm/madvise.c index d8cfc3a0c153..26f7954865ed 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -288,9 +288,9 @@ static long madvise_willneed(struct vm_area_struct *vma, */ *prev = NULL; /* tell sys_madvise we drop mmap_sem */ get_file(file); - up_read(¤t->mm->mmap_sem); offset = (loff_t)(start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); + up_read(¤t->mm->mmap_sem); vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); fput(file); down_read(¤t->mm->mmap_sem); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0d6f3ea86738..402c8bc65e08 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2895,8 +2895,10 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, return; cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); - if (!cw) + if (!cw) { + css_put(&memcg->css); return; + } cw->memcg = memcg; cw->cachep = cachep; @@ -5768,7 +5770,6 @@ static void __mem_cgroup_clear_mc(void) if (!mem_cgroup_is_root(mc.to)) page_counter_uncharge(&mc.to->memory, mc.moved_swap); - mem_cgroup_id_get_many(mc.to, mc.moved_swap); css_put_many(&mc.to->css, mc.moved_swap); mc.moved_swap = 0; @@ -5959,7 +5960,8 @@ put: /* get_mctgt_type() gets the page */ ent = target.ent; if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { mc.precharge--; - /* we fixup refcnts and charges later. */ + mem_cgroup_id_get_many(mc.to, 1); + /* we fixup other refcnts and charges later. */ mc.moved_swap++; } break; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c054945a9a74..3eb0b311b4a1 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1566,6 +1566,20 @@ static int __ref __offline_pages(unsigned long start_pfn, /* check again */ ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, check_pages_isolated_cb); + /* + * per-cpu pages are drained in start_isolate_page_range, but if + * there are still pages that are not free, make sure that we + * drain again, because when we isolated range we might + * have raced with another thread that was adding pages to pcp + * list. + * + * Forward progress should be still guaranteed because + * pages on the pcp list can only belong to MOVABLE_ZONE + * because has_unmovable_pages explicitly checks for + * PageBuddy on freed pages on other zones. + */ + if (ret) + drain_all_pages(zone); } while (ret); /* Ok, all of our target is isolated. @@ -1751,7 +1765,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) */ rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb); if (rc) - goto done; + return rc; /* remove memmap entry */ firmware_map_remove(start, start + size, "System RAM"); @@ -1771,9 +1785,8 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) try_offline_node(nid); -done: mem_hotplug_done(); - return rc; + return 0; } /** diff --git a/mm/mmap.c b/mm/mmap.c index 514cc19c5916..a3584a90c55c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2622,7 +2622,7 @@ static void unmap_region(struct mm_struct *mm, * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. */ -static void +static bool detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long end) { @@ -2647,6 +2647,17 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, /* Kill the cache */ vmacache_invalidate(mm); + + /* + * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or + * VM_GROWSUP VMA. Such VMAs can change their size under + * down_read(mmap_lock) and collide with the VMA we are about to unmap. + */ + if (vma && (vma->vm_flags & VM_GROWSDOWN)) + return false; + if (prev && (prev->vm_flags & VM_GROWSUP)) + return false; + return true; } /* @@ -2827,7 +2838,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, } /* Detach vmas from rbtree */ - detach_vmas_to_be_unmapped(mm, vma, prev, end); + if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) + downgrade = false; if (downgrade) downgrade_write(&mm->mmap_sem); @@ -3160,6 +3172,7 @@ void exit_mmap(struct mm_struct *mm) if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); vma = remove_vma(vma); + cond_resched(); } vm_unacct_memory(nr_accounted); } diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 3e612ae748e9..a1da47e02747 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c @@ -25,13 +25,16 @@ void use_mm(struct mm_struct *mm) struct task_struct *tsk = current; task_lock(tsk); + /* Hold off tlb flush IPIs while switching mm's */ + local_irq_disable(); active_mm = tsk->active_mm; if (active_mm != mm) { mmgrab(mm); tsk->active_mm = mm; } tsk->mm = mm; - switch_mm(active_mm, mm, tsk); + switch_mm_irqs_off(active_mm, mm, tsk); + local_irq_enable(); task_unlock(tsk); #ifdef finish_arch_post_lock_switch finish_arch_post_lock_switch(); @@ -56,9 +59,11 @@ void unuse_mm(struct mm_struct *mm) task_lock(tsk); sync_mm_rss(mm); + local_irq_disable(); tsk->mm = NULL; /* active_mm is still 'mm' */ enter_lazy_tlb(mm, tsk); + local_irq_enable(); task_unlock(tsk); } EXPORT_SYMBOL_GPL(unuse_mm); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 98d5c940facd..67a9943aa595 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1256,6 +1256,11 @@ static void free_pcppages_bulk(struct zone *zone, int count, struct page *page, *tmp; LIST_HEAD(head); + /* + * Ensure proper count is passed which otherwise would stuck in the + * below while (list_empty(list)) loop. + */ + count = min(pcp->count, count); while (count) { struct list_head *list; @@ -1640,7 +1645,6 @@ static void __init deferred_free_pages(unsigned long pfn, } else if (!(pfn & nr_pgmask)) { deferred_free_range(pfn - nr_free, nr_free); nr_free = 1; - touch_nmi_watchdog(); } else { nr_free++; } @@ -1670,7 +1674,6 @@ static unsigned long __init deferred_init_pages(struct zone *zone, continue; } else if (!page || !(pfn & nr_pgmask)) { page = pfn_to_page(pfn); - touch_nmi_watchdog(); } else { page++; } @@ -1793,6 +1796,13 @@ static int __init deferred_init_memmap(void *data) BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); pgdat->first_deferred_pfn = ULONG_MAX; + /* + * Once we unlock here, the zone cannot be grown anymore, thus if an + * interrupt thread must allocate this early in boot, zone must be + * pre-grown prior to start of deferred page initialization. + */ + pgdat_resize_unlock(pgdat, &flags); + /* Only the highest zone is deferred so find it */ for (zid = 0; zid < MAX_NR_ZONES; zid++) { zone = pgdat->node_zones + zid; @@ -1810,11 +1820,11 @@ static int __init deferred_init_memmap(void *data) * that we can avoid introducing any issues with the buddy * allocator. */ - while (spfn < epfn) + while (spfn < epfn) { nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); + cond_resched(); + } zone_empty: - pgdat_resize_unlock(pgdat, &flags); - /* Sanity check that the next zone really is unpopulated */ WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); @@ -1856,17 +1866,6 @@ deferred_grow_zone(struct zone *zone, unsigned int order) pgdat_resize_lock(pgdat, &flags); - /* - * If deferred pages have been initialized while we were waiting for - * the lock, return true, as the zone was grown. The caller will retry - * this zone. We won't return to this function since the caller also - * has this static branch. - */ - if (!static_branch_unlikely(&deferred_pages)) { - pgdat_resize_unlock(pgdat, &flags); - return true; - } - /* * If someone grew this zone while we were waiting for spinlock, return * true, as there might be enough pages already. @@ -1895,6 +1894,7 @@ deferred_grow_zone(struct zone *zone, unsigned int order) first_deferred_pfn = spfn; nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); + touch_nmi_watchdog(); /* We should only stop along section boundaries */ if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) @@ -7872,7 +7872,7 @@ int __meminit init_per_zone_wmark_min(void) return 0; } -core_initcall(init_per_zone_wmark_min) +postcore_initcall(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so diff --git a/mm/page_counter.c b/mm/page_counter.c index de31470655f6..147ff99187b8 100644 --- a/mm/page_counter.c +++ b/mm/page_counter.c @@ -77,7 +77,7 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) long new; new = atomic_long_add_return(nr_pages, &c->usage); - propagate_protected_usage(counter, new); + propagate_protected_usage(c, new); /* * This is indeed racy, but we can live with some * inaccuracy in the watermark. @@ -121,7 +121,7 @@ bool page_counter_try_charge(struct page_counter *counter, new = atomic_long_add_return(nr_pages, &c->usage); if (new > c->max) { atomic_long_sub(nr_pages, &c->usage); - propagate_protected_usage(counter, new); + propagate_protected_usage(c, new); /* * This is racy, but we can live with some * inaccuracy in the failcnt. @@ -130,7 +130,7 @@ bool page_counter_try_charge(struct page_counter *counter, *fail = c; goto failed; } - propagate_protected_usage(counter, new); + propagate_protected_usage(c, new); /* * Just like with failcnt, we can live with some * inaccuracy in the watermark. diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 89c19c0feadb..da0f6e1ae01e 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -187,6 +187,14 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) * pageblocks we may have modified and return -EBUSY to caller. This * prevents two threads from simultaneously working on overlapping ranges. * + * Please note that there is no strong synchronization with the page allocator + * either. Pages might be freed while their page blocks are marked ISOLATED. + * In some cases pages might still end up on pcp lists and that would allow + * for their allocation even when they are in fact isolated already. Depending + * on how strong of a guarantee the caller needs drain_all_pages might be needed + * (e.g. __offline_pages will need to call it after check for isolated range for + * a next retry). + * * Return: the number of isolated pageblocks on success and -EBUSY if any part * of range cannot be isolated. */ diff --git a/mm/percpu.c b/mm/percpu.c index 7e06a1e58720..806bc16f88eb 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1328,7 +1328,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, /* allocate chunk */ alloc_size = sizeof(struct pcpu_chunk) + - BITS_TO_LONGS(region_size >> PAGE_SHIFT); + BITS_TO_LONGS(region_size >> PAGE_SHIFT) * sizeof(unsigned long); chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); if (!chunk) panic("%s: Failed to allocate %zu bytes\n", __func__, diff --git a/mm/shuffle.c b/mm/shuffle.c index b3fe97fd6654..56958ffa5a3a 100644 --- a/mm/shuffle.c +++ b/mm/shuffle.c @@ -58,25 +58,25 @@ module_param_call(shuffle, shuffle_store, shuffle_show, &shuffle_param, 0400); * For two pages to be swapped in the shuffle, they must be free (on a * 'free_area' lru), have the same order, and have the same migratetype. */ -static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order) +static struct page * __meminit shuffle_valid_page(struct zone *zone, + unsigned long pfn, int order) { - struct page *page; + struct page *page = pfn_to_online_page(pfn); /* * Given we're dealing with randomly selected pfns in a zone we * need to ask questions like... */ - /* ...is the pfn even in the memmap? */ - if (!pfn_valid_within(pfn)) + /* ... is the page managed by the buddy? */ + if (!page) return NULL; - /* ...is the pfn in a present section or a hole? */ - if (!pfn_present(pfn)) + /* ... is the page assigned to the same zone? */ + if (page_zone(page) != zone) return NULL; /* ...is the page free and currently on a free_area list? */ - page = pfn_to_page(pfn); if (!PageBuddy(page)) return NULL; @@ -123,7 +123,7 @@ void __meminit __shuffle_zone(struct zone *z) * page_j randomly selected in the span @zone_start_pfn to * @spanned_pages. */ - page_i = shuffle_valid_page(i, order); + page_i = shuffle_valid_page(z, i, order); if (!page_i) continue; @@ -137,7 +137,7 @@ void __meminit __shuffle_zone(struct zone *z) j = z->zone_start_pfn + ALIGN_DOWN(get_random_long() % z->spanned_pages, order_pages); - page_j = shuffle_valid_page(j, order); + page_j = shuffle_valid_page(z, j, order); if (page_j && page_j != page_i) break; } diff --git a/mm/slab_common.c b/mm/slab_common.c index ade6c257d4b4..e36dd36c7076 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -326,6 +326,14 @@ int slab_unmergeable(struct kmem_cache *s) if (s->refcount < 0) return 1; +#ifdef CONFIG_MEMCG_KMEM + /* + * Skip the dying kmem_cache. + */ + if (s->memcg_params.dying) + return 1; +#endif + return 0; } @@ -886,12 +894,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s) return 0; } -static void flush_memcg_workqueue(struct kmem_cache *s) +static void memcg_set_kmem_cache_dying(struct kmem_cache *s) { spin_lock_irq(&memcg_kmem_wq_lock); s->memcg_params.dying = true; spin_unlock_irq(&memcg_kmem_wq_lock); +} +static void flush_memcg_workqueue(struct kmem_cache *s) +{ /* * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make * sure all registered rcu callbacks have been invoked. @@ -923,10 +934,6 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s) { return 0; } - -static inline void flush_memcg_workqueue(struct kmem_cache *s) -{ -} #endif /* CONFIG_MEMCG_KMEM */ void slab_kmem_cache_release(struct kmem_cache *s) @@ -944,8 +951,6 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; - flush_memcg_workqueue(s); - get_online_cpus(); get_online_mems(); @@ -955,6 +960,22 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->refcount) goto out_unlock; +#ifdef CONFIG_MEMCG_KMEM + memcg_set_kmem_cache_dying(s); + + mutex_unlock(&slab_mutex); + + put_online_mems(); + put_online_cpus(); + + flush_memcg_workqueue(s); + + get_online_cpus(); + get_online_mems(); + + mutex_lock(&slab_mutex); +#endif + err = shutdown_memcg_caches(s); if (!err) err = shutdown_cache(s); @@ -1740,7 +1761,7 @@ void kzfree(const void *p) if (unlikely(ZERO_OR_NULL_PTR(mem))) return; ks = ksize(mem); - memset(mem, 0, ks); + memzero_explicit(mem, ks); kfree(mem); } EXPORT_SYMBOL(kzfree); diff --git a/mm/slub.c b/mm/slub.c index fca33abd6c42..822ba0724529 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -644,6 +644,20 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) va_end(args); } +static bool freelist_corrupted(struct kmem_cache *s, struct page *page, + void **freelist, void *nextfree) +{ + if ((s->flags & SLAB_CONSISTENCY_CHECKS) && + !check_valid_pointer(s, page, nextfree) && freelist) { + object_err(s, page, *freelist, "Freechain corrupt"); + *freelist = NULL; + slab_fix(s, "Isolate corrupted freechain"); + return true; + } + + return false; +} + static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) { unsigned int off; /* Offset of last byte */ @@ -1379,6 +1393,11 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} +static bool freelist_corrupted(struct kmem_cache *s, struct page *page, + void **freelist, void *nextfree) +{ + return false; +} #endif /* CONFIG_SLUB_DEBUG */ /* @@ -2062,6 +2081,14 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, void *prior; unsigned long counters; + /* + * If 'nextfree' is invalid, it is possible that the object at + * 'freelist' is already corrupted. So isolate all objects + * starting at 'freelist'. + */ + if (freelist_corrupted(s, page, &freelist, nextfree)) + break; + do { prior = page->freelist; counters = page->counters; @@ -5621,7 +5648,8 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) */ if (buffer) buf = buffer; - else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf)) + else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) && + !IS_ENABLED(CONFIG_SLUB_STATS)) buf = mbuf; else { buffer = (char *) get_zeroed_page(GFP_KERNEL); diff --git a/mm/swap_state.c b/mm/swap_state.c index 8e7ce9a9bc5e..4ce014dc4571 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -23,6 +23,7 @@ #include #include +#include "internal.h" /* * swapper_space is a fiction, retained to simplify the path through @@ -418,7 +419,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, /* May fail (-ENOMEM) if XArray node allocation failed. */ __SetPageLocked(new_page); __SetPageSwapBacked(new_page); - err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); + err = add_to_swap_cache(new_page, entry, + gfp_mask & GFP_RECLAIM_MASK); if (likely(!err)) { /* Initiate read into locked page */ SetPageWorkingset(new_page); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ad4d00bd7914..5797e1eeaa7e 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -85,6 +85,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) if (pmd_none_or_clear_bad(pmd)) continue; vunmap_pte_range(pmd, addr, next); + + cond_resched(); } while (pmd++, addr = next, addr != end); } diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 13cd683a658a..12ecacf0c55f 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -362,6 +362,10 @@ static void p9_read_work(struct work_struct *work) if (m->rreq->status == REQ_STATUS_SENT) { list_del(&m->rreq->req_list); p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD); + } else if (m->rreq->status == REQ_STATUS_FLSHD) { + /* Ignore replies associated with a cancelled request. */ + p9_debug(P9_DEBUG_TRANS, + "Ignore replies associated with a cancelled request\n"); } else { spin_unlock(&m->client->lock); p9_debug(P9_DEBUG_ERROR, @@ -703,11 +707,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req) { p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); + spin_lock(&client->lock); + /* Ignore cancelled request if message has been received + * before lock. + */ + if (req->status == REQ_STATUS_RCVD) { + spin_unlock(&client->lock); + return 0; + } + /* we haven't received a response for oldreq, * remove it from the list. */ - spin_lock(&client->lock); list_del(&req->req_list); + req->status = REQ_STATUS_FLSHD; spin_unlock(&client->lock); p9_req_put(req); @@ -803,20 +816,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd) return -ENOMEM; ts->rd = fget(rfd); + if (!ts->rd) + goto out_free_ts; + if (!(ts->rd->f_mode & FMODE_READ)) + goto out_put_rd; ts->wr = fget(wfd); - if (!ts->rd || !ts->wr) { - if (ts->rd) - fput(ts->rd); - if (ts->wr) - fput(ts->wr); - kfree(ts); - return -EIO; - } + if (!ts->wr) + goto out_put_rd; + if (!(ts->wr->f_mode & FMODE_WRITE)) + goto out_put_wr; client->trans = ts; client->status = Connected; return 0; + +out_put_wr: + fput(ts->wr); +out_put_rd: + fput(ts->rd); +out_free_ts: + kfree(ts); + return -EIO; } static int p9_socket_open(struct p9_client *client, struct socket *csocket) diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c index 550c6ca007cc..9c1241292d1d 100644 --- a/net/appletalk/atalk_proc.c +++ b/net/appletalk/atalk_proc.c @@ -229,6 +229,8 @@ int __init atalk_proc_init(void) sizeof(struct aarp_iter_state), NULL)) goto out; + return 0; + out: remove_proc_subtree("atalk", init_net.proc_net); return -ENOMEM; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index e5a3dc28116d..2fdb1b573e8c 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1187,7 +1187,10 @@ static int __must_check ax25_connect(struct socket *sock, if (addr_len > sizeof(struct sockaddr_ax25) && fsa->fsa_ax25.sax25_ndigis != 0) { /* Valid number of digipeaters ? */ - if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) { + if (fsa->fsa_ax25.sax25_ndigis < 1 || + fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS || + addr_len < sizeof(struct sockaddr_ax25) + + sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) { err = -EINVAL; goto out_release; } @@ -1507,7 +1510,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax; /* Valid number of digipeaters ? */ - if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) { + if (usax->sax25_ndigis < 1 || + usax->sax25_ndigis > AX25_MAX_DIGIS || + addr_len < sizeof(struct sockaddr_ax25) + + sizeof(ax25_address) * usax->sax25_ndigis) { err = -EINVAL; goto out; } diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 2614a9caee00..a39af0eefad3 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -120,20 +120,7 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) rtnl_lock(); ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings); rtnl_unlock(); - - /* Virtual interface drivers such as tun / tap interfaces, VLAN, etc - * tend to initialize the interface throughput with some value for the - * sake of having a throughput number to export via ethtool. This - * exported throughput leaves batman-adv to conclude the interface - * throughput is genuine (reflecting reality), thus no measurements - * are necessary. - * - * Based on the observation that those interface types also tend to set - * the link auto-negotiation to 'off', batman-adv shall check this - * setting to differentiate between genuine link throughput information - * and placeholders installed by virtual interfaces. - */ - if (ret == 0 && link_settings.base.autoneg == AUTONEG_ENABLE) { + if (ret == 0) { /* link characteristics might change over time */ if (link_settings.base.duplex == DUPLEX_FULL) hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX; diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index a9e7540c5691..3165f6ff8ee7 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -878,6 +878,12 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl, ogm_packet->version, ntohs(ogm_packet->tvlv_len)); + if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) { + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "Drop packet: originator packet from ourself\n"); + return; + } + /* If the throughput metric is 0, immediately drop the packet. No need * to create orig_node / neigh_node for an unusable route. */ @@ -1005,11 +1011,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; - ogm_packet = (struct batadv_ogm2_packet *)skb->data; - - if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) - goto free_skb; - batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES, skb->len + ETH_HLEN); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 663a53b6d36e..5f6309ade1ea 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -437,7 +437,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); - netif_rx(skb); + if (in_interrupt()) + netif_rx(skb); + else + netif_rx_ni(skb); out: if (primary_if) batadv_hardif_put(primary_if); diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 47df4c678988..89c9097007c3 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -703,8 +703,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET; /* store the client address if the message is going to a client */ - if (ret == BATADV_DHCP_TO_CLIENT && - pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) { + if (ret == BATADV_DHCP_TO_CLIENT) { + if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) + return BATADV_DHCP_NO; + /* check if the DHCP packet carries an Ethernet DHCP */ p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET; if (*p != BATADV_DHCP_HTYPE_ETHERNET) diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 4febc82a7c76..52fb6d6d6d58 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -50,6 +50,7 @@ static bool enable_6lowpan; /* We are listening incoming connections via this channel */ static struct l2cap_chan *listen_chan; +static DEFINE_MUTEX(set_lock); struct lowpan_peer { struct list_head list; @@ -1070,12 +1071,14 @@ static void do_enable_set(struct work_struct *work) enable_6lowpan = set_enable->flag; + mutex_lock(&set_lock); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); } listen_chan = bt_6lowpan_listen(); + mutex_unlock(&set_lock); kfree(set_enable); } @@ -1127,11 +1130,13 @@ static ssize_t lowpan_control_write(struct file *fp, if (ret == -EINVAL) return ret; + mutex_lock(&set_lock); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); listen_chan = NULL; } + mutex_unlock(&set_lock); if (conn) { struct lowpan_peer *peer; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index c1d3a303d97f..7bf6860fed78 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1274,6 +1274,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, { struct discovery_state *d = &hdev->discovery; + if (len > HCI_MAX_AD_LENGTH) + return; + bacpy(&d->last_adv_addr, bdaddr); d->last_adv_addr_type = bdaddr_type; d->last_adv_rssi = rssi; @@ -2441,7 +2444,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s num_rsp %d", hdev->name, num_rsp); - if (!num_rsp) + if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) @@ -4064,6 +4067,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct inquiry_info_with_rssi_and_pscan_mode *info; info = (void *) (skb->data + 1); + if (skb->len < num_rsp * sizeof(*info) + 1) + goto unlock; + for (; num_rsp; num_rsp--, info++) { u32 flags; @@ -4085,6 +4091,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, } else { struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); + if (skb->len < num_rsp * sizeof(*info) + 1) + goto unlock; + for (; num_rsp; num_rsp--, info++) { u32 flags; @@ -4105,6 +4114,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, } } +unlock: hci_dev_unlock(hdev); } @@ -4216,6 +4226,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, case 0x11: /* Unsupported Feature or Parameter Value */ case 0x1c: /* SCO interval rejected */ case 0x1a: /* Unsupported Remote Feature */ + case 0x1e: /* Invalid LMP Parameters */ case 0x1f: /* Unspecified error */ case 0x20: /* Unsupported LMP Parameter value */ if (conn->out) { @@ -4266,7 +4277,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, BT_DBG("%s num_rsp %d", hdev->name, num_rsp); - if (!num_rsp) + if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) @@ -5230,7 +5241,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *direct_addr, - u8 direct_addr_type, s8 rssi, u8 *data, u8 len) + u8 direct_addr_type, s8 rssi, u8 *data, u8 len, + bool ext_adv) { struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; @@ -5252,6 +5264,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, return; } + if (!ext_adv && len > HCI_MAX_AD_LENGTH) { + bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); + return; + } + /* Find the end of the data in case the report contains padded zero * bytes at the end causing an invalid length value. * @@ -5311,7 +5328,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, */ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, direct_addr); - if (conn && type == LE_ADV_IND) { + if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { /* Store report for later inclusion by * mgmt_device_connected */ @@ -5365,7 +5382,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * event or send an immediate device found event if the data * should not be stored for later. */ - if (!has_pending_adv_report(hdev)) { + if (!ext_adv && !has_pending_adv_report(hdev)) { /* If the report will trigger a SCAN_REQ store it for * later merging. */ @@ -5400,7 +5417,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, /* If the new report will trigger a SCAN_REQ store it for * later merging. */ - if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + if (!ext_adv && (type == LE_ADV_IND || + type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; @@ -5440,7 +5458,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, rssi, - ev->data, ev->length); + ev->data, ev->length, false); } else { bt_dev_err(hdev, "Dropping invalid advertising data"); } @@ -5514,7 +5532,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, ev->rssi, - ev->data, ev->length); + ev->data, ev->length, + !(evt_type & LE_EXT_ADV_LEGACY_PDU)); } ptr += sizeof(*ev) + ev->length; @@ -5712,7 +5731,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, &ev->direct_addr, - ev->direct_addr_type, ev->rssi, NULL, 0); + ev->direct_addr_type, ev->rssi, NULL, 0, + false); ptr += sizeof(*ev); } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index ad12fe3fca8c..ee2902b51d45 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1007,7 +1007,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); if (skb_transport_offset(skb) + ipv6_transport_len(skb) < - nsrcs_offset + sizeof(_nsrcs)) + nsrcs_offset + sizeof(__nsrcs)) return -EINVAL; _nsrcs = skb_header_pointer(skb, nsrcs_offset, diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index ce2ab14ee605..cecb4223440e 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -208,8 +208,8 @@ struct net_bridge_port_group { struct rcu_head rcu; struct timer_list timer; struct br_ip addr; + unsigned char eth_addr[ETH_ALEN] __aligned(2); unsigned char flags; - unsigned char eth_addr[ETH_ALEN]; }; struct net_bridge_mdb_entry { diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c index 809673222382..8d033a75a766 100644 --- a/net/bridge/netfilter/nf_conntrack_bridge.c +++ b/net/bridge/netfilter/nf_conntrack_bridge.c @@ -168,6 +168,7 @@ static unsigned int nf_ct_br_defrag4(struct sk_buff *skb, static unsigned int nf_ct_br_defrag6(struct sk_buff *skb, const struct nf_hook_state *state) { +#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) u16 zone_id = NF_CT_DEFAULT_ZONE_ID; enum ip_conntrack_info ctinfo; struct br_input_skb_cb cb; @@ -180,14 +181,17 @@ static unsigned int nf_ct_br_defrag6(struct sk_buff *skb, br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm)); - err = nf_ipv6_br_defrag(state->net, skb, - IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id); + err = nf_ct_frag6_gather(state->net, skb, + IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id); /* queued */ if (err == -EINPROGRESS) return NF_STOLEN; br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size); return err == 0 ? NF_ACCEPT : NF_DROP; +#else + return NF_ACCEPT; +#endif } static int nf_ct_br_ip_check(const struct sk_buff *skb) diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index f7587428febd..bf9fd6ee88fe 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -398,6 +398,7 @@ static int j1939_sk_init(struct sock *sk) spin_lock_init(&jsk->sk_session_queue_lock); INIT_LIST_HEAD(&jsk->sk_session_queue); sk->sk_destruct = j1939_sk_sock_destruct; + sk->sk_protocol = CAN_J1939; return 0; } @@ -466,6 +467,14 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) goto out_release_sock; } + if (!ndev->ml_priv) { + netdev_warn_once(ndev, + "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n"); + dev_put(ndev); + ret = -ENODEV; + goto out_release_sock; + } + priv = j1939_netdev_start(ndev); dev_put(ndev); if (IS_ERR(priv)) { @@ -553,6 +562,11 @@ static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr, static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr, const struct j1939_sock *jsk, int peer) { + /* There are two holes (2 bytes and 3 bytes) to clear to avoid + * leaking kernel information to user space. + */ + memset(addr, 0, J1939_MIN_NAMELEN); + addr->can_family = AF_CAN; addr->can_ifindex = jsk->ifindex; addr->can_addr.j1939.pgn = jsk->addr.pgn; diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index 9f99af5b0b11..a8dd956b5e8e 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -352,17 +352,16 @@ void j1939_session_skb_queue(struct j1939_session *session, skb_queue_tail(&session->skb_queue, skb); } -static struct sk_buff *j1939_session_skb_find(struct j1939_session *session) +static struct +sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session, + unsigned int offset_start) { struct j1939_priv *priv = session->priv; + struct j1939_sk_buff_cb *do_skcb; struct sk_buff *skb = NULL; struct sk_buff *do_skb; - struct j1939_sk_buff_cb *do_skcb; - unsigned int offset_start; unsigned long flags; - offset_start = session->pkt.dpo * 7; - spin_lock_irqsave(&session->skb_queue.lock, flags); skb_queue_walk(&session->skb_queue, do_skb) { do_skcb = j1939_skb_to_cb(do_skb); @@ -382,6 +381,14 @@ static struct sk_buff *j1939_session_skb_find(struct j1939_session *session) return skb; } +static struct sk_buff *j1939_session_skb_find(struct j1939_session *session) +{ + unsigned int offset_start; + + offset_start = session->pkt.dpo * 7; + return j1939_session_skb_find_by_offset(session, offset_start); +} + /* see if we are receiver * returns 0 for broadcasts, although we will receive them */ @@ -716,10 +723,12 @@ static int j1939_session_tx_rts(struct j1939_session *session) return ret; session->last_txcmd = dat[0]; - if (dat[0] == J1939_TP_CMD_BAM) + if (dat[0] == J1939_TP_CMD_BAM) { j1939_tp_schedule_txtimer(session, 50); - - j1939_tp_set_rxtimeout(session, 1250); + j1939_tp_set_rxtimeout(session, 250); + } else { + j1939_tp_set_rxtimeout(session, 1250); + } netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); @@ -766,7 +775,7 @@ static int j1939_session_tx_dat(struct j1939_session *session) int ret = 0; u8 dat[8]; - se_skb = j1939_session_skb_find(session); + se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7); if (!se_skb) return -ENOBUFS; @@ -787,6 +796,18 @@ static int j1939_session_tx_dat(struct j1939_session *session) if (len > 7) len = 7; + if (offset + len > se_skb->len) { + netdev_err_once(priv->ndev, + "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n", + __func__, session, skcb->offset, se_skb->len , session->pkt.tx); + return -EOVERFLOW; + } + + if (!len) { + ret = -ENOBUFS; + break; + } + memcpy(&dat[1], &tpdat[offset], len); ret = j1939_tp_tx_dat(session, dat, len + 1); if (ret < 0) { @@ -1055,9 +1076,9 @@ static void __j1939_session_cancel(struct j1939_session *session, lockdep_assert_held(&session->priv->active_session_list_lock); session->err = j1939_xtp_abort_to_errno(priv, err); + session->state = J1939_SESSION_WAITING_ABORT; /* do not send aborts on incoming broadcasts */ if (!j1939_cb_is_broadcast(&session->skcb)) { - session->state = J1939_SESSION_WAITING_ABORT; j1939_xtp_tx_abort(priv, &session->skcb, !session->transmission, err, session->skcb.addr.pgn); @@ -1120,6 +1141,9 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer) * cleanup including propagation of the error to user space. */ break; + case -EOVERFLOW: + j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG); + break; case 0: session->tx_retry = 0; break; @@ -1651,8 +1675,12 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb, return; } session = j1939_xtp_rx_rts_session_new(priv, skb); - if (!session) + if (!session) { + if (cmd == J1939_TP_CMD_BAM && j1939_sk_recv_match(priv, skcb)) + netdev_info(priv->ndev, "%s: failed to create TP BAM session\n", + __func__); return; + } } else { if (j1939_xtp_rx_rts_session_active(session, skb)) { j1939_session_put(session); @@ -1661,11 +1689,15 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb, } session->last_cmd = cmd; - j1939_tp_set_rxtimeout(session, 1250); - - if (cmd != J1939_TP_CMD_BAM && !session->transmission) { - j1939_session_txtimer_cancel(session); - j1939_tp_schedule_txtimer(session, 0); + if (cmd == J1939_TP_CMD_BAM) { + if (!session->transmission) + j1939_tp_set_rxtimeout(session, 750); + } else { + if (!session->transmission) { + j1939_session_txtimer_cancel(session); + j1939_tp_schedule_txtimer(session, 0); + } + j1939_tp_set_rxtimeout(session, 1250); } j1939_session_put(session); @@ -1716,6 +1748,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, int offset; int nbytes; bool final = false; + bool remain = false; bool do_cts_eoma = false; int packet; @@ -1750,7 +1783,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, __func__, session); goto out_session_cancel; } - se_skb = j1939_session_skb_find(session); + + se_skb = j1939_session_skb_find_by_offset(session, packet * 7); if (!se_skb) { netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__, session); @@ -1769,7 +1803,20 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, } tpdat = se_skb->data; - memcpy(&tpdat[offset], &dat[1], nbytes); + if (!session->transmission) { + memcpy(&tpdat[offset], &dat[1], nbytes); + } else { + int err; + + err = memcmp(&tpdat[offset], &dat[1], nbytes); + if (err) + netdev_err_once(priv->ndev, + "%s: 0x%p: Data of RX-looped back packet (%*ph) doesn't match TX data (%*ph)!\n", + __func__, session, + nbytes, &dat[1], + nbytes, &tpdat[offset]); + } + if (packet == session->pkt.rx) session->pkt.rx++; @@ -1777,6 +1824,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, j1939_cb_is_broadcast(&session->skcb)) { if (session->pkt.rx >= session->pkt.total) final = true; + else + remain = true; } else { /* never final, an EOMA must follow */ if (session->pkt.rx >= session->pkt.last) @@ -1784,7 +1833,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, } if (final) { + j1939_session_timers_cancel(session); j1939_session_completed(session); + } else if (remain) { + if (!session->transmission) + j1939_tp_set_rxtimeout(session, 750); } else if (do_cts_eoma) { j1939_tp_set_rxtimeout(session, 1250); if (!session->transmission) @@ -1829,6 +1882,13 @@ static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb) else j1939_xtp_rx_dat_one(session, skb); } + + if (j1939_cb_is_broadcast(skcb)) { + session = j1939_session_get_by_addr(priv, &skcb->addr, false, + false); + if (session) + j1939_xtp_rx_dat_one(session, skb); + } } /* j1939 main intf */ @@ -1920,7 +1980,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb) if (j1939_tp_im_transmitter(skcb)) j1939_xtp_rx_rts(priv, skb, true); - if (j1939_tp_im_receiver(skcb)) + if (j1939_tp_im_receiver(skcb) || j1939_cb_is_broadcast(skcb)) j1939_xtp_rx_rts(priv, skb, false); break; @@ -1984,7 +2044,7 @@ int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb) { struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); - if (!j1939_tp_im_involved_anydir(skcb)) + if (!j1939_tp_im_involved_anydir(skcb) && !j1939_cb_is_broadcast(skcb)) return 0; switch (skcb->addr.pgn) { @@ -2017,6 +2077,10 @@ void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb) if (!skb->sk) return; + if (skb->sk->sk_family != AF_CAN || + skb->sk->sk_protocol != CAN_J1939) + return; + j1939_session_list_lock(priv); session = j1939_session_get_simple(priv, skb); j1939_session_list_unlock(priv); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index e513b8876d13..a8481da37f1a 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -445,6 +445,7 @@ static void target_copy(struct ceph_osd_request_target *dest, dest->size = src->size; dest->min_size = src->min_size; dest->sort_bitwise = src->sort_bitwise; + dest->recovery_deletes = src->recovery_deletes; dest->flags = src->flags; dest->paused = src->paused; diff --git a/net/compat.c b/net/compat.c index 0f7ded26059e..c848bcb517f3 100644 --- a/net/compat.c +++ b/net/compat.c @@ -291,6 +291,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) break; } /* Bump the usage count and install the file. */ + __receive_sock(fp[i]); fd_install(new_fd, get_file(fp[i])); } diff --git a/net/core/dev.c b/net/core/dev.c index 8552874e5aac..cdc1c3a144e1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -79,6 +79,7 @@ #include #include #include +#include #include #include #include @@ -194,7 +195,7 @@ static DEFINE_SPINLOCK(napi_hash_lock); static unsigned int napi_gen_id = NR_CPUS; static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); -static seqcount_t devnet_rename_seq; +static DECLARE_RWSEM(devnet_rename_sem); static inline void dev_base_seq_inc(struct net *net) { @@ -816,33 +817,28 @@ EXPORT_SYMBOL(dev_get_by_napi_id); * @net: network namespace * @name: a pointer to the buffer where the name will be stored. * @ifindex: the ifindex of the interface to get the name from. - * - * The use of raw_seqcount_begin() and cond_resched() before - * retrying is required as we want to give the writers a chance - * to complete when CONFIG_PREEMPT is not set. */ int netdev_get_name(struct net *net, char *name, int ifindex) { struct net_device *dev; - unsigned int seq; + int ret; -retry: - seq = raw_seqcount_begin(&devnet_rename_seq); + down_read(&devnet_rename_sem); rcu_read_lock(); + dev = dev_get_by_index_rcu(net, ifindex); if (!dev) { - rcu_read_unlock(); - return -ENODEV; + ret = -ENODEV; + goto out; } strcpy(name, dev->name); - rcu_read_unlock(); - if (read_seqcount_retry(&devnet_rename_seq, seq)) { - cond_resched(); - goto retry; - } - return 0; + ret = 0; +out: + rcu_read_unlock(); + up_read(&devnet_rename_sem); + return ret; } /** @@ -1115,10 +1111,10 @@ int dev_change_name(struct net_device *dev, const char *newname) likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) return -EBUSY; - write_seqcount_begin(&devnet_rename_seq); + down_write(&devnet_rename_sem); if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { - write_seqcount_end(&devnet_rename_seq); + up_write(&devnet_rename_sem); return 0; } @@ -1126,7 +1122,7 @@ int dev_change_name(struct net_device *dev, const char *newname) err = dev_get_valid_name(net, dev, newname); if (err < 0) { - write_seqcount_end(&devnet_rename_seq); + up_write(&devnet_rename_sem); return err; } @@ -1141,11 +1137,11 @@ rollback: if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); dev->name_assign_type = old_assign_type; - write_seqcount_end(&devnet_rename_seq); + up_write(&devnet_rename_sem); return ret; } - write_seqcount_end(&devnet_rename_seq); + up_write(&devnet_rename_sem); netdev_adjacent_rename_links(dev, oldname); @@ -1166,7 +1162,7 @@ rollback: /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; - write_seqcount_begin(&devnet_rename_seq); + down_write(&devnet_rename_sem); memcpy(dev->name, oldname, IFNAMSIZ); memcpy(oldname, newname, IFNAMSIZ); dev->name_assign_type = old_assign_type; @@ -3836,10 +3832,12 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) local_bh_disable(); + dev_xmit_recursion_inc(); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_drv_stopped(txq)) ret = netdev_start_xmit(skb, dev, txq, false); HARD_TX_UNLOCK(dev, txq); + dev_xmit_recursion_dec(); local_bh_enable(); @@ -5231,7 +5229,7 @@ static void flush_backlog(struct work_struct *work) skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); - kfree_skb(skb); + dev_kfree_skb_irq(skb); input_queue_head_incr(sd); } } @@ -5604,12 +5602,13 @@ static void napi_skb_free_stolen_head(struct sk_buff *skb) kmem_cache_free(skbuff_head_cache, skb); } -static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) +static gro_result_t napi_skb_finish(struct napi_struct *napi, + struct sk_buff *skb, + gro_result_t ret) { switch (ret) { case GRO_NORMAL: - if (netif_receive_skb_internal(skb)) - ret = GRO_DROP; + gro_normal_one(napi, skb); break; case GRO_DROP: @@ -5641,7 +5640,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) skb_gro_reset_offset(skb); - ret = napi_skb_finish(dev_gro_receive(napi, skb), skb); + ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); trace_napi_gro_receive_exit(ret); return ret; @@ -6232,12 +6231,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, netdev_err_once(dev, "%s() called with weight %d\n", __func__, weight); napi->weight = weight; - list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; #ifdef CONFIG_NETPOLL napi->poll_owner = -1; #endif set_bit(NAPI_STATE_SCHED, &napi->state); + set_bit(NAPI_STATE_NPSVC, &napi->state); + list_add_rcu(&napi->dev_list, &dev->napi_list); napi_hash_add(napi); } EXPORT_SYMBOL(netif_napi_add); @@ -9118,6 +9118,13 @@ int register_netdevice(struct net_device *dev) rcu_barrier(); dev->reg_state = NETREG_UNREGISTERED; + /* We should put the kobject that hold in + * netdev_unregister_kobject(), otherwise + * the net device cannot be freed when + * driver calls free_netdev(), because the + * kobject is being hold. + */ + kobject_put(&dev->dev.kobj); } /* * Prevent userspace races by waiting until the network diff --git a/net/core/filter.c b/net/core/filter.c index f1f2304822e3..5c490d473df1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1766,25 +1766,27 @@ BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, u32, offset, void *, to, u32, len, u32, start_header) { u8 *end = skb_tail_pointer(skb); - u8 *net = skb_network_header(skb); - u8 *mac = skb_mac_header(skb); - u8 *ptr; + u8 *start, *ptr; - if (unlikely(offset > 0xffff || len > (end - mac))) + if (unlikely(offset > 0xffff)) goto err_clear; switch (start_header) { case BPF_HDR_START_MAC: - ptr = mac + offset; + if (unlikely(!skb_mac_header_was_set(skb))) + goto err_clear; + start = skb_mac_header(skb); break; case BPF_HDR_START_NET: - ptr = net + offset; + start = skb_network_header(skb); break; default: goto err_clear; } - if (likely(ptr >= mac && ptr + len <= end)) { + ptr = start + offset; + + if (likely(ptr + len <= end)) { memcpy(to, ptr, len); return 0; } @@ -5728,12 +5730,16 @@ BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) { unsigned int iphdr_len; - if (skb->protocol == cpu_to_be16(ETH_P_IP)) + switch (skb_protocol(skb, true)) { + case cpu_to_be16(ETH_P_IP): iphdr_len = sizeof(struct iphdr); - else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) + break; + case cpu_to_be16(ETH_P_IPV6): iphdr_len = sizeof(struct ipv6hdr); - else + break; + default: return 0; + } if (skb_headlen(skb) < iphdr_len) return 0; @@ -8004,6 +8010,43 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, offsetof(OBJ, OBJ_FIELD)); \ } while (0) +#define SOCK_OPS_GET_SK() \ + do { \ + int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \ + if (si->dst_reg == reg || si->src_reg == reg) \ + reg--; \ + if (si->dst_reg == reg || si->src_reg == reg) \ + reg--; \ + if (si->dst_reg == si->src_reg) { \ + *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \ + offsetof(struct bpf_sock_ops_kern, \ + temp)); \ + fullsock_reg = reg; \ + jmp += 2; \ + } \ + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ + struct bpf_sock_ops_kern, \ + is_fullsock), \ + fullsock_reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, \ + is_fullsock)); \ + *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ + if (si->dst_reg == si->src_reg) \ + *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, \ + temp)); \ + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ + struct bpf_sock_ops_kern, sk),\ + si->dst_reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, sk));\ + if (si->dst_reg == si->src_reg) { \ + *insn++ = BPF_JMP_A(1); \ + *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, \ + temp)); \ + } \ + } while (0) + #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \ SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock) @@ -8288,17 +8331,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked); break; case offsetof(struct bpf_sock_ops, sk): - *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct bpf_sock_ops_kern, - is_fullsock), - si->dst_reg, si->src_reg, - offsetof(struct bpf_sock_ops_kern, - is_fullsock)); - *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); - *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct bpf_sock_ops_kern, sk), - si->dst_reg, si->src_reg, - offsetof(struct bpf_sock_ops_kern, sk)); + SOCK_OPS_GET_SK(); break; } return insn - insn_buf; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 4c826b8bf9b1..2ebf9b252779 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1036,7 +1036,7 @@ static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) trans_timeout = queue->trans_timeout; spin_unlock_irq(&queue->_xmit_lock); - return sprintf(buf, "%lu", trans_timeout); + return sprintf(buf, fmt_ulong, trans_timeout); } static unsigned int get_netdev_queue_index(struct netdev_queue *queue) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 849380a622ef..cb67d36f3adb 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -161,7 +161,7 @@ static void poll_napi(struct net_device *dev) struct napi_struct *napi; int cpu = smp_processor_id(); - list_for_each_entry(napi, &dev->napi_list, dev_list) { + list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) { if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) { poll_one_napi(napi); smp_store_release(&napi->poll_owner, -1); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 944acb1a9f29..b0c06a063776 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3231,7 +3231,8 @@ replay: */ if (err < 0) { /* If device is not registered at all, free it now */ - if (dev->reg_state == NETREG_UNINITIALIZED) + if (dev->reg_state == NETREG_UNINITIALIZED || + dev->reg_state == NETREG_UNREGISTERED) free_netdev(dev); goto out; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3823128f3256..ad439732cca3 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5343,8 +5343,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb) skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto err_free; - - if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) + /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ + if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) goto err_free; vhdr = (struct vlan_hdr *)skb->data; @@ -5908,9 +5908,13 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, if (skb_has_frag_list(skb)) skb_clone_fraglist(skb); - if (k == 0) { - /* split line is in frag list */ - pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); + /* split line is in frag list */ + if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { + /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ + if (skb_has_frag_list(skb)) + kfree_skb_list(skb_shinfo(skb)->frag_list); + kfree(data); + return -ENOMEM; } skb_release_data(skb); diff --git a/net/core/skmsg.c b/net/core/skmsg.c index ded2d5227678..118cf1ace43a 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -7,6 +7,7 @@ #include #include +#include static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce) { @@ -686,13 +687,74 @@ static struct sk_psock *sk_psock_from_strp(struct strparser *strp) return container_of(parser, struct sk_psock, parser); } -static void sk_psock_verdict_apply(struct sk_psock *psock, - struct sk_buff *skb, int verdict) +static void sk_psock_skb_redirect(struct sk_buff *skb) { struct sk_psock *psock_other; struct sock *sk_other; bool ingress; + sk_other = tcp_skb_bpf_redirect_fetch(skb); + if (unlikely(!sk_other)) { + kfree_skb(skb); + return; + } + psock_other = sk_psock(sk_other); + if (!psock_other || sock_flag(sk_other, SOCK_DEAD) || + !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) { + kfree_skb(skb); + return; + } + + ingress = tcp_skb_bpf_ingress(skb); + if ((!ingress && sock_writeable(sk_other)) || + (ingress && + atomic_read(&sk_other->sk_rmem_alloc) <= + sk_other->sk_rcvbuf)) { + if (!ingress) + skb_set_owner_w(skb, sk_other); + skb_queue_tail(&psock_other->ingress_skb, skb); + schedule_work(&psock_other->work); + } else { + kfree_skb(skb); + } +} + +static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict) +{ + switch (verdict) { + case __SK_REDIRECT: + sk_psock_skb_redirect(skb); + break; + case __SK_PASS: + case __SK_DROP: + default: + break; + } +} + +int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) +{ + struct bpf_prog *prog; + int ret = __SK_PASS; + + rcu_read_lock(); + prog = READ_ONCE(psock->progs.skb_verdict); + if (likely(prog)) { + tcp_skb_bpf_redirect_clear(skb); + ret = sk_psock_bpf_run(psock, prog, skb); + ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); + } + sk_psock_tls_verdict_apply(skb, ret); + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); + +static void sk_psock_verdict_apply(struct sk_psock *psock, + struct sk_buff *skb, int verdict) +{ + struct sock *sk_other; + switch (verdict) { case __SK_PASS: sk_other = psock->sk; @@ -711,25 +773,8 @@ static void sk_psock_verdict_apply(struct sk_psock *psock, } goto out_free; case __SK_REDIRECT: - sk_other = tcp_skb_bpf_redirect_fetch(skb); - if (unlikely(!sk_other)) - goto out_free; - psock_other = sk_psock(sk_other); - if (!psock_other || sock_flag(sk_other, SOCK_DEAD) || - !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) - goto out_free; - ingress = tcp_skb_bpf_ingress(skb); - if ((!ingress && sock_writeable(sk_other)) || - (ingress && - atomic_read(&sk_other->sk_rmem_alloc) <= - sk_other->sk_rcvbuf)) { - if (!ingress) - skb_set_owner_w(skb, sk_other); - skb_queue_tail(&psock_other->ingress_skb, skb); - schedule_work(&psock_other->work); - break; - } - /* fall-through */ + sk_psock_skb_redirect(skb); + break; case __SK_DROP: /* fall-through */ default: @@ -740,11 +785,18 @@ out_free: static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) { - struct sk_psock *psock = sk_psock_from_strp(strp); + struct sk_psock *psock; struct bpf_prog *prog; int ret = __SK_DROP; + struct sock *sk; rcu_read_lock(); + sk = strp->sk; + psock = sk_psock(sk); + if (unlikely(!psock)) { + kfree_skb(skb); + goto out; + } prog = READ_ONCE(psock->progs.skb_verdict); if (likely(prog)) { skb_orphan(skb); @@ -752,8 +804,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); } - rcu_read_unlock(); sk_psock_verdict_apply(psock, skb, ret); +out: + rcu_read_unlock(); } static int sk_psock_strp_read_done(struct strparser *strp, int err) @@ -783,9 +836,13 @@ static void sk_psock_strp_data_ready(struct sock *sk) rcu_read_lock(); psock = sk_psock(sk); if (likely(psock)) { - write_lock_bh(&sk->sk_callback_lock); - strp_data_ready(&psock->parser.strp); - write_unlock_bh(&sk->sk_callback_lock); + if (tls_sw_has_ctx_rx(sk)) { + psock->parser.saved_data_ready(sk); + } else { + write_lock_bh(&sk->sk_callback_lock); + strp_data_ready(&psock->parser.strp); + write_unlock_bh(&sk->sk_callback_lock); + } } rcu_read_unlock(); } diff --git a/net/core/sock.c b/net/core/sock.c index 0adf7a9e5a90..919f1a1739e9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -709,7 +709,7 @@ bool sk_mc_loop(struct sock *sk) return inet6_sk(sk)->mc_loop; #endif } - WARN_ON(1); + WARN_ON_ONCE(1); return true; } EXPORT_SYMBOL(sk_mc_loop); @@ -1679,6 +1679,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, cgroup_sk_alloc(&sk->sk_cgrp_data); sock_update_classid(&sk->sk_cgrp_data); sock_update_netprioidx(&sk->sk_cgrp_data); + sk_tx_queue_clear(sk); } return sk; @@ -1836,7 +1837,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) /* sk->sk_memcg will be populated at accept() time */ newsk->sk_memcg = NULL; - cgroup_sk_alloc(&newsk->sk_cgrp_data); + cgroup_sk_clone(&newsk->sk_cgrp_data); rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); @@ -1895,6 +1896,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) */ sk_refcnt_debug_inc(newsk); sk_set_socket(newsk, NULL); + sk_tx_queue_clear(newsk); RCU_INIT_POINTER(newsk->sk_wq, NULL); if (newsk->sk_prot->sockets_allocated) @@ -2734,6 +2736,27 @@ int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct * } EXPORT_SYMBOL(sock_no_mmap); +/* + * When a file is received (via SCM_RIGHTS, etc), we must bump the + * various sock-based usage counts. + */ +void __receive_sock(struct file *file) +{ + struct socket *sock; + int error; + + /* + * The resulting value of "error" is ignored here since we only + * need to take action when the file is a socket and testing + * "sock" for NULL is sufficient. + */ + sock = sock_from_file(file, &error); + if (sock) { + sock_update_netprioidx(&sock->sk->sk_cgrp_data); + sock_update_classid(&sock->sk->sk_cgrp_data); + } +} + ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { ssize_t res; @@ -3335,6 +3358,16 @@ static void sock_inuse_add(struct net *net, int val) } #endif +static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot) +{ + if (!twsk_prot) + return; + kfree(twsk_prot->twsk_slab_name); + twsk_prot->twsk_slab_name = NULL; + kmem_cache_destroy(twsk_prot->twsk_slab); + twsk_prot->twsk_slab = NULL; +} + static void req_prot_cleanup(struct request_sock_ops *rsk_prot) { if (!rsk_prot) @@ -3405,7 +3438,7 @@ int proto_register(struct proto *prot, int alloc_slab) prot->slab_flags, NULL); if (prot->twsk_prot->twsk_slab == NULL) - goto out_free_timewait_sock_slab_name; + goto out_free_timewait_sock_slab; } } @@ -3413,15 +3446,15 @@ int proto_register(struct proto *prot, int alloc_slab) ret = assign_proto_idx(prot); if (ret) { mutex_unlock(&proto_list_mutex); - goto out_free_timewait_sock_slab_name; + goto out_free_timewait_sock_slab; } list_add(&prot->node, &proto_list); mutex_unlock(&proto_list_mutex); return ret; -out_free_timewait_sock_slab_name: +out_free_timewait_sock_slab: if (alloc_slab && prot->twsk_prot) - kfree(prot->twsk_prot->twsk_slab_name); + tw_prot_cleanup(prot->twsk_prot); out_free_request_sock_slab: if (alloc_slab) { req_prot_cleanup(prot->rsk_prot); @@ -3445,12 +3478,7 @@ void proto_unregister(struct proto *prot) prot->slab = NULL; req_prot_cleanup(prot->rsk_prot); - - if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { - kmem_cache_destroy(prot->twsk_prot->twsk_slab); - kfree(prot->twsk_prot->twsk_slab_name); - prot->twsk_prot->twsk_slab = NULL; - } + tw_prot_cleanup(prot->twsk_prot); } EXPORT_SYMBOL(proto_unregister); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 8291568b707f..df52061f99f7 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -71,7 +71,42 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); - ret = sock_map_prog_update(map, prog, attr->attach_type); + ret = sock_map_prog_update(map, prog, NULL, attr->attach_type); + fdput(f); + return ret; +} + +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) +{ + u32 ufd = attr->target_fd; + struct bpf_prog *prog; + struct bpf_map *map; + struct fd f; + int ret; + + if (attr->attach_flags) + return -EINVAL; + + f = fdget(ufd); + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + + prog = bpf_prog_get(attr->attach_bpf_fd); + if (IS_ERR(prog)) { + ret = PTR_ERR(prog); + goto put_map; + } + + if (prog->type != ptype) { + ret = -EINVAL; + goto put_prog; + } + + ret = sock_map_prog_update(map, NULL, prog, attr->attach_type); +put_prog: + bpf_prog_put(prog); +put_map: fdput(f); return ret; } @@ -837,11 +872,15 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) err = -EINVAL; goto free_htab; } + err = bpf_map_charge_init(&htab->map.memory, cost); + if (err) + goto free_htab; htab->buckets = bpf_map_area_alloc(htab->buckets_num * sizeof(struct bpf_htab_bucket), htab->map.numa_node); if (!htab->buckets) { + bpf_map_charge_finish(&htab->map.memory); err = -ENOMEM; goto free_htab; } @@ -861,6 +900,7 @@ static void sock_hash_free(struct bpf_map *map) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct bpf_htab_bucket *bucket; + struct hlist_head unlink_list; struct bpf_htab_elem *elem; struct hlist_node *node; int i; @@ -872,13 +912,32 @@ static void sock_hash_free(struct bpf_map *map) synchronize_rcu(); for (i = 0; i < htab->buckets_num; i++) { bucket = sock_hash_select_bucket(htab, i); - hlist_for_each_entry_safe(elem, node, &bucket->head, node) { - hlist_del_rcu(&elem->node); + + /* We are racing with sock_hash_delete_from_link to + * enter the spin-lock critical section. Every socket on + * the list is still linked to sockhash. Since link + * exists, psock exists and holds a ref to socket. That + * lets us to grab a socket ref too. + */ + raw_spin_lock_bh(&bucket->lock); + hlist_for_each_entry(elem, &bucket->head, node) + sock_hold(elem->sk); + hlist_move_list(&bucket->head, &unlink_list); + raw_spin_unlock_bh(&bucket->lock); + + /* Process removed entries out of atomic context to + * block for socket lock before deleting the psock's + * link to sockhash. + */ + hlist_for_each_entry_safe(elem, node, &unlink_list, node) { + hlist_del(&elem->node); lock_sock(elem->sk); rcu_read_lock(); sock_map_unref(elem->sk, elem); rcu_read_unlock(); release_sock(elem->sk); + sock_put(elem->sk); + sock_hash_free_elem(htab, elem); } } @@ -991,27 +1050,32 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) } int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, - u32 which) + struct bpf_prog *old, u32 which) { struct sk_psock_progs *progs = sock_map_progs(map); + struct bpf_prog **pprog; if (!progs) return -EOPNOTSUPP; switch (which) { case BPF_SK_MSG_VERDICT: - psock_set_prog(&progs->msg_parser, prog); + pprog = &progs->msg_parser; break; case BPF_SK_SKB_STREAM_PARSER: - psock_set_prog(&progs->skb_parser, prog); + pprog = &progs->skb_parser; break; case BPF_SK_SKB_STREAM_VERDICT: - psock_set_prog(&progs->skb_verdict, prog); + pprog = &progs->skb_verdict; break; default: return -EOPNOTSUPP; } + if (old) + return psock_replace_prog(pprog, prog, old); + + psock_set_prog(pprog, prog); return 0; } diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index f3ceec93f392..40829111fe00 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -112,6 +112,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) more_reuse->prog = reuse->prog; more_reuse->reuseport_id = reuse->reuseport_id; more_reuse->bind_inany = reuse->bind_inany; + more_reuse->has_conns = reuse->has_conns; memcpy(more_reuse->socks, reuse->socks, reuse->num_socks * sizeof(struct sock *)); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 9f9e00ba3ad7..669cbe1609d9 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -277,7 +277,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); if (write && !ret) { if (jit_enable < 2 || - (jit_enable == 2 && bpf_dump_raw_ok())) { + (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) { *(int *)table->data = jit_enable; if (jit_enable == 2) pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n"); diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index e8eaa804ccb9..d6200ff98200 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -13,6 +13,16 @@ #define DSA_HLEN 4 #define EDSA_HLEN 8 +#define FRAME_TYPE_TO_CPU 0x00 +#define FRAME_TYPE_FORWARD 0x03 + +#define TO_CPU_CODE_MGMT_TRAP 0x00 +#define TO_CPU_CODE_FRAME2REG 0x01 +#define TO_CPU_CODE_IGMP_MLD_TRAP 0x02 +#define TO_CPU_CODE_POLICY_TRAP 0x03 +#define TO_CPU_CODE_ARP_MIRROR 0x04 +#define TO_CPU_CODE_POLICY_MIRROR 0x05 + static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_port *dp = dsa_slave_to_port(dev); @@ -77,6 +87,8 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { u8 *edsa_header; + int frame_type; + int code; int source_device; int source_port; @@ -91,8 +103,29 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, /* * Check that frame type is either TO_CPU or FORWARD. */ - if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0) + frame_type = edsa_header[0] >> 6; + + switch (frame_type) { + case FRAME_TYPE_TO_CPU: + code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1); + + /* + * Mark the frame to never egress on any port of the same switch + * unless it's a trapped IGMP/MLD packet, in which case the + * bridge might want to forward it. + */ + if (code != TO_CPU_CODE_IGMP_MLD_TRAP) + skb->offload_fwd_mark = 1; + + break; + + case FRAME_TYPE_FORWARD: + skb->offload_fwd_mark = 1; + break; + + default: return NULL; + } /* * Determine source device and port. @@ -156,8 +189,6 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, 2 * ETH_ALEN); } - skb->offload_fwd_mark = 1; - return skb; } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 01588eef0cee..b1b3220917ca 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1100,7 +1100,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table, if (fl4.flowi4_scope < RT_SCOPE_LINK) fl4.flowi4_scope = RT_SCOPE_LINK; - if (table) + if (table && table != RT_TABLE_MAIN) tbl = fib_get_table(net, table); if (tbl) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 1b851fd82613..51673d00bbea 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1751,7 +1751,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) while ((l = leaf_walk_rcu(&tp, key)) != NULL) { struct key_vector *local_l = NULL, *local_tp; - hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { + hlist_for_each_entry(fa, &l->leaf, fa_list) { struct fib_alias *new_fa; if (local_tb->tb_id != fa->tb_id) @@ -2010,7 +2010,8 @@ void fib_info_notify_update(struct net *net, struct nl_info *info) struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct fib_table *tb; - hlist_for_each_entry_rcu(tb, head, tb_hlist) + hlist_for_each_entry_rcu(tb, head, tb_hlist, + lockdep_rtnl_is_held()) __fib_info_notify_update(net, tb, info); } } diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 4de7e962d3da..c840141876bc 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -15,12 +15,12 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); + bool need_csum, need_recompute_csum, gso_partial; struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int gre_offset, outer_hlen; - bool need_csum, gso_partial; if (!skb->encapsulation) goto out; @@ -41,6 +41,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, skb->protocol = skb->inner_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); + need_recompute_csum = skb->csum_not_inet; skb->encap_hdr_csum = need_csum; features &= skb->dev->hw_enc_features; @@ -98,7 +99,15 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, } *(pcsum + 1) = 0; - *pcsum = gso_make_checksum(skb, 0); + if (need_recompute_csum && !skb_is_gso(skb)) { + __wsum csum; + + csum = skb_checksum(skb, gre_offset, + skb->len - gre_offset, 0); + *pcsum = csum_fold(csum); + } else { + *pcsum = gso_make_checksum(skb, 0); + } } while ((skb = skb->next)); out: return segs; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index ac95ba78b903..73f46cb5e51d 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -427,7 +427,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) ipcm_init(&ipc); inet->tos = ip_hdr(skb)->tos; - sk->sk_mark = mark; + ipc.sockc.mark = mark; daddr = ipc.addr = ip_hdr(skb)->saddr; saddr = fib_compute_spec_dst(skb); @@ -709,10 +709,10 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, icmp_param.skb = skb_in; icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; - sk->sk_mark = mark; ipcm_init(&ipc); ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts.opt; + ipc.sockc.mark = mark; rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, type, code, &icmp_param); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 5e486895d67c..9745c52f49ca 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -284,55 +284,12 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb, ipv6_only_sock(sk), true, false); } -/* Obtain a reference to a local port for the given sock, - * if snum is zero it means select any available local port. - * We try to allocate an odd port (and leave even ports for connect()) - */ -int inet_csk_get_port(struct sock *sk, unsigned short snum) +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, + struct sock *sk) { - bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; - struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; - int ret = 1, port = snum; - struct inet_bind_hashbucket *head; - struct net *net = sock_net(sk); - struct inet_bind_bucket *tb = NULL; kuid_t uid = sock_i_uid(sk); - int l3mdev; + bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; - l3mdev = inet_sk_bound_l3mdev(sk); - - if (!port) { - head = inet_csk_find_open_port(sk, &tb, &port); - if (!head) - return ret; - if (!tb) - goto tb_not_found; - goto success; - } - head = &hinfo->bhash[inet_bhashfn(net, port, - hinfo->bhash_size)]; - spin_lock_bh(&head->lock); - inet_bind_bucket_for_each(tb, &head->chain) - if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && - tb->port == port) - goto tb_found; -tb_not_found: - tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, - net, head, port, l3mdev); - if (!tb) - goto fail_unlock; -tb_found: - if (!hlist_empty(&tb->owners)) { - if (sk->sk_reuse == SK_FORCE_REUSE) - goto success; - - if ((tb->fastreuse > 0 && reuse) || - sk_reuseport_match(tb, sk)) - goto success; - if (inet_csk_bind_conflict(sk, tb, true, true)) - goto fail_unlock; - } -success: if (hlist_empty(&tb->owners)) { tb->fastreuse = reuse; if (sk->sk_reuseport) { @@ -376,6 +333,58 @@ success: tb->fastreuseport = 0; } } +} + +/* Obtain a reference to a local port for the given sock, + * if snum is zero it means select any available local port. + * We try to allocate an odd port (and leave even ports for connect()) + */ +int inet_csk_get_port(struct sock *sk, unsigned short snum) +{ + bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; + struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; + int ret = 1, port = snum; + struct inet_bind_hashbucket *head; + struct net *net = sock_net(sk); + struct inet_bind_bucket *tb = NULL; + int l3mdev; + + l3mdev = inet_sk_bound_l3mdev(sk); + + if (!port) { + head = inet_csk_find_open_port(sk, &tb, &port); + if (!head) + return ret; + if (!tb) + goto tb_not_found; + goto success; + } + head = &hinfo->bhash[inet_bhashfn(net, port, + hinfo->bhash_size)]; + spin_lock_bh(&head->lock); + inet_bind_bucket_for_each(tb, &head->chain) + if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && + tb->port == port) + goto tb_found; +tb_not_found: + tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, + net, head, port, l3mdev); + if (!tb) + goto fail_unlock; +tb_found: + if (!hlist_empty(&tb->owners)) { + if (sk->sk_reuse == SK_FORCE_REUSE) + goto success; + + if ((tb->fastreuse > 0 && reuse) || + sk_reuseport_match(tb, sk)) + goto success; + if (inet_csk_bind_conflict(sk, tb, true, true)) + goto fail_unlock; + } +success: + inet_csk_update_fastreuse(tb, sk); + if (!inet_csk(sk)->icsk_bind_hash) inet_bind_hash(sk, tb, port); WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 2bbaaf0c7176..006a34b18537 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -163,6 +163,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child) return -ENOMEM; } } + inet_csk_update_fastreuse(tb, child); } inet_bind_hash(child, tb, port); spin_unlock(&head->lock); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index b268ee1c1b44..b36c4a3159e5 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1704,7 +1704,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_bound_dev_if = arg->bound_dev_if; sk->sk_sndbuf = sysctl_wmem_default; - sk->sk_mark = fl4.flowi4_mark; + ipc.sockc.mark = fl4.flowi4_mark; err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, &ipc, &rt, MSG_DONTWAIT); if (unlikely(err)) { diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index cd4b84310d92..a0b4dc54f8a6 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -85,9 +85,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, __be32 remote, __be32 local, __be32 key) { - unsigned int hash; struct ip_tunnel *t, *cand = NULL; struct hlist_head *head; + struct net_device *ndev; + unsigned int hash; hash = ip_tunnel_hash(key, remote); head = &itn->tunnels[hash]; @@ -162,8 +163,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, if (t && t->dev->flags & IFF_UP) return t; - if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP) - return netdev_priv(itn->fb_tunnel_dev); + ndev = READ_ONCE(itn->fb_tunnel_dev); + if (ndev && ndev->flags & IFF_UP) + return netdev_priv(ndev); return NULL; } @@ -1245,9 +1247,9 @@ void ip_tunnel_uninit(struct net_device *dev) struct ip_tunnel_net *itn; itn = net_generic(net, tunnel->ip_tnl_net_id); - /* fb_tunnel_dev will be unregisted in net-exit call. */ - if (itn->fb_tunnel_dev != dev) - ip_tunnel_del(itn, netdev_priv(dev)); + ip_tunnel_del(itn, netdev_priv(dev)); + if (itn->fb_tunnel_dev == dev) + WRITE_ONCE(itn->fb_tunnel_dev, NULL); dst_cache_reset(&tunnel->dst_cache); } diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index a01f500d6a6b..afa2c5049845 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -403,7 +403,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[], struct nexthop_grp *nhg; unsigned int i, j; - if (len & (sizeof(struct nexthop_grp) - 1)) { + if (!len || len & (sizeof(struct nexthop_grp) - 1)) { NL_SET_ERR_MSG(extack, "Invalid length for nexthop group attribute"); return -EINVAL; @@ -1105,6 +1105,9 @@ static struct nexthop *nexthop_create_group(struct net *net, struct nexthop *nh; int i; + if (WARN_ON(!num_nh)) + return ERR_PTR(-EINVAL); + nh = nexthop_alloc(); if (!nh) return ERR_PTR(-ENOMEM); diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 535427292194..df6fbefe44d4 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -786,6 +786,9 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) inet_sk_flowi_flags(sk), faddr, saddr, 0, 0, sk->sk_uid); + fl4.fl4_icmp_type = user_icmph.type; + fl4.fl4_icmp_code = user_icmph.code; + security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 0902cb32bbad..c83a5d05aeaa 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -307,24 +307,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, struct ctl_table tbl = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH * 2 * TCP_FASTOPEN_KEY_MAX) + (TCP_FASTOPEN_KEY_MAX * 5)) }; - struct tcp_fastopen_context *ctx; - u32 user_key[TCP_FASTOPEN_KEY_MAX * 4]; - __le32 key[TCP_FASTOPEN_KEY_MAX * 4]; + u32 user_key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u32)]; + __le32 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(__le32)]; char *backup_data; - int ret, i = 0, off = 0, n_keys = 0; + int ret, i = 0, off = 0, n_keys; tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); if (!tbl.data) return -ENOMEM; - rcu_read_lock(); - ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx); - if (ctx) { - n_keys = tcp_fastopen_context_len(ctx); - memcpy(&key[0], &ctx->key[0], TCP_FASTOPEN_KEY_LENGTH * n_keys); - } - rcu_read_unlock(); - + n_keys = tcp_fastopen_get_cipher(net, NULL, (u64 *)key); if (!n_keys) { memset(&key[0], 0, TCP_FASTOPEN_KEY_LENGTH); n_keys = 1; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index fe3cdeddd097..01ddfb4156e4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2631,6 +2631,9 @@ int tcp_disconnect(struct sock *sk, int flags) tp->window_clamp = 0; tp->delivered = 0; tp->delivered_ce = 0; + if (icsk->icsk_ca_ops->release) + icsk->icsk_ca_ops->release(sk); + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); tcp_set_ca_state(sk, TCP_CA_Open); tp->is_sack_reneg = 0; tcp_clear_retrans(tp); @@ -3085,10 +3088,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, #ifdef CONFIG_TCP_MD5SIG case TCP_MD5SIG: case TCP_MD5SIG_EXT: - if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) - err = tp->af_specific->md5_parse(sk, optname, optval, optlen); - else - err = -EINVAL; + err = tp->af_specific->md5_parse(sk, optname, optval, optlen); break; #endif case TCP_USER_TIMEOUT: @@ -3527,22 +3527,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level, return 0; case TCP_FASTOPEN_KEY: { - __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; - struct tcp_fastopen_context *ctx; - unsigned int key_len = 0; + u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; + unsigned int key_len; if (get_user(len, optlen)) return -EFAULT; - rcu_read_lock(); - ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); - if (ctx) { - key_len = tcp_fastopen_context_len(ctx) * - TCP_FASTOPEN_KEY_LENGTH; - memcpy(&key[0], &ctx->key[0], key_len); - } - rcu_read_unlock(); - + key_len = tcp_fastopen_get_cipher(net, icsk, key) * + TCP_FASTOPEN_KEY_LENGTH; len = min_t(unsigned int, len, key_len); if (put_user(len, optlen)) return -EFAULT; @@ -3844,10 +3836,13 @@ EXPORT_SYMBOL(tcp_md5_hash_skb_data); int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) { + u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ struct scatterlist sg; - sg_init_one(&sg, key->key, key->keylen); - ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen); + sg_init_one(&sg, key->key, keylen); + ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen); + + /* tcp_md5_do_add() might change key->key under us */ return crypto_ahash_update(hp->md5_req); } EXPORT_SYMBOL(tcp_md5_hash_key); diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 69b025408390..ad9f38202731 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -96,6 +96,9 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, } while (i != msg_rx->sg.end); if (unlikely(peek)) { + if (msg_rx == list_last_entry(&psock->ingress_msg, + struct sk_msg, list)) + break; msg_rx = list_next_entry(msg_rx, list); continue; } diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index c445a81d144e..d7a1f2ef6c52 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -197,7 +197,7 @@ static void tcp_reinit_congestion_control(struct sock *sk, icsk->icsk_ca_setsockopt = 1; memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); - if (sk->sk_state != TCP_CLOSE) + if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) tcp_init_congestion_control(sk); } diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 1b3d032a4df2..ee6c38a73325 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -404,6 +404,8 @@ static void hystart_update(struct sock *sk, u32 delay) if (hystart_detect & HYSTART_DELAY) { /* obtain the minimum delay of more than sampling packets */ + if (ca->curr_rtt > delay) + ca->curr_rtt = delay; if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { if (ca->curr_rtt == 0 || ca->curr_rtt > delay) ca->curr_rtt = delay; diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index a915ade0c818..a9971e41f31b 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -108,6 +108,29 @@ out: return err; } +int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk, + u64 *key) +{ + struct tcp_fastopen_context *ctx; + int n_keys = 0, i; + + rcu_read_lock(); + if (icsk) + ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); + else + ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx); + if (ctx) { + n_keys = tcp_fastopen_context_len(ctx); + for (i = 0; i < n_keys; i++) { + put_unaligned_le64(ctx->key[i].key[0], key + (i * 2)); + put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1); + } + } + rcu_read_unlock(); + + return n_keys; +} + static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req, struct sk_buff *syn, const siphash_key_t *key, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 677facbeed26..ab5358281000 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -260,7 +260,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb) * cwnd may be very low (even just 1 packet), so we should ACK * immediately. */ - inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; + if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) + inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; } } @@ -2943,6 +2944,8 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag, u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) { + if (!delta) + delta = 1; seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ); ca_rtt_us = seq_rtt_us; } @@ -3504,10 +3507,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) } } -/* This routine deals with acks during a TLP episode. - * We mark the end of a TLP episode on receiving TLP dupack or when - * ack is after tlp_high_seq. - * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. +/* This routine deals with acks during a TLP episode and ends an episode by + * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack */ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) { @@ -3516,7 +3517,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) if (before(ack, tp->tlp_high_seq)) return; - if (flag & FLAG_DSACKING_ACK) { + if (!tp->tlp_retrans) { + /* TLP of new data has been acknowledged */ + tp->tlp_high_seq = 0; + } else if (flag & FLAG_DSACKING_ACK) { /* This DSACK means original and TLP probe arrived; no loss */ tp->tlp_high_seq = 0; } else if (after(ack, tp->tlp_high_seq)) { @@ -3682,6 +3686,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_in_ack_event(sk, ack_ev_flags); } + /* This is a deviation from RFC3168 since it states that: + * "When the TCP data sender is ready to set the CWR bit after reducing + * the congestion window, it SHOULD set the CWR bit only on the first + * new data packet that it transmits." + * We accept CWR on pure ACKs to be more robust + * with widely-deployed TCP implementations that do this. + */ + tcp_ecn_accept_cwr(sk, skb); + /* We passed data and got it acked, remove any soft error * log. Something worked... */ @@ -4554,6 +4567,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); + sk->sk_data_ready(sk); tcp_drop(sk, skb); return; } @@ -4587,7 +4601,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) { coalesce_done: - tcp_grow_window(sk, skb); + /* For non sack flows, do not grow window to force DUPACK + * and trigger fast retransmit. + */ + if (tcp_is_sack(tp)) + tcp_grow_window(sk, skb); kfree_skb_partial(skb, fragstolen); skb = NULL; goto add_sack; @@ -4671,7 +4689,11 @@ add_sack: tcp_sack_new_ofo_skb(sk, seq, end_seq); end: if (skb) { - tcp_grow_window(sk, skb); + /* For non sack flows, do not grow window to force DUPACK + * and trigger fast retransmit. + */ + if (tcp_is_sack(tp)) + tcp_grow_window(sk, skb); skb_condense(skb); skb_set_owner_r(skb, sk); } @@ -4771,8 +4793,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) skb_dst_drop(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); - tcp_ecn_accept_cwr(sk, skb); - tp->rx_opt.dsack = 0; /* Queue data for delivery to the user. @@ -4791,6 +4811,7 @@ queue_and_out: sk_forced_mem_schedule(sk, skb->truesize); else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); + sk->sk_data_ready(sk); goto drop; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index eda64871f983..35f963690a70 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1077,9 +1077,18 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen); if (key) { - /* Pre-existing entry - just update that one. */ + /* Pre-existing entry - just update that one. + * Note that the key might be used concurrently. + */ memcpy(key->key, newkey, newkeylen); - key->keylen = newkeylen; + + /* Pairs with READ_ONCE() in tcp_md5_hash_key(). + * Also note that a reader could catch new key->keylen value + * but old key->key[], this is the reason we use __GFP_ZERO + * at sock_kmalloc() time below these lines. + */ + WRITE_ONCE(key->keylen, newkeylen); + return 0; } @@ -1095,7 +1104,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, rcu_assign_pointer(tp->md5sig_info, md5sig); } - key = sock_kmalloc(sk, sizeof(*key), gfp); + key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO); if (!key) return -ENOMEM; if (!tcp_alloc_md5sig_pool()) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index c8d03c1b4c6b..4407193bd702 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -662,7 +662,8 @@ static unsigned int tcp_synack_options(const struct sock *sk, unsigned int mss, struct sk_buff *skb, struct tcp_out_options *opts, const struct tcp_md5sig_key *md5, - struct tcp_fastopen_cookie *foc) + struct tcp_fastopen_cookie *foc, + enum tcp_synack_type synack_type) { struct inet_request_sock *ireq = inet_rsk(req); unsigned int remaining = MAX_TCP_OPTION_SPACE; @@ -677,7 +678,8 @@ static unsigned int tcp_synack_options(const struct sock *sk, * rather than TS in order to fit in better with old, * buggy kernels, but that was deemed to be unnecessary. */ - ireq->tstamp_ok &= !ireq->sack_ok; + if (synack_type != TCP_SYNACK_COOKIE) + ireq->tstamp_ok &= !ireq->sack_ok; } #endif @@ -2562,6 +2564,11 @@ void tcp_send_loss_probe(struct sock *sk) int pcount; int mss = tcp_current_mss(sk); + /* At most one outstanding TLP */ + if (tp->tlp_high_seq) + goto rearm_timer; + + tp->tlp_retrans = 0; skb = tcp_send_head(sk); if (skb && tcp_snd_wnd_test(tp, skb, mss)) { pcount = tp->packets_out; @@ -2579,10 +2586,6 @@ void tcp_send_loss_probe(struct sock *sk) return; } - /* At most one outstanding TLP retransmission. */ - if (tp->tlp_high_seq) - goto rearm_timer; - if (skb_still_in_host_queue(sk, skb)) goto rearm_timer; @@ -2604,10 +2607,12 @@ void tcp_send_loss_probe(struct sock *sk) if (__tcp_retransmit_skb(sk, skb, 1)) goto rearm_timer; + tp->tlp_retrans = 1; + +probe_sent: /* Record snd_nxt for loss detection. */ tp->tlp_high_seq = tp->snd_nxt; -probe_sent: NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); /* Reset s.t. tcp_rearm_rto will restart timer from now */ inet_csk(sk)->icsk_pending = 0; @@ -3326,7 +3331,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, #endif skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, - foc) + sizeof(*th); + foc, synack_type) + sizeof(*th); skb_push(skb, tcp_header_size); skb_reset_transport_header(skb); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f3b7cb725c1b..5d016bbdf16e 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -413,7 +413,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, struct udp_hslot *hslot2, struct sk_buff *skb) { - struct sock *sk, *result; + struct sock *sk, *result, *reuseport_result; int score, badness; u32 hash = 0; @@ -423,17 +423,20 @@ static struct sock *udp4_lib_lookup2(struct net *net, score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, sdif); if (score > badness) { + reuseport_result = NULL; + if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); - result = reuseport_select_sock(sk, hash, skb, - sizeof(struct udphdr)); - if (result && !reuseport_has_conns(sk, false)) - return result; + reuseport_result = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); + if (reuseport_result && !reuseport_has_conns(sk, false)) + return reuseport_result; } + + result = reuseport_result ? : sk; badness = score; - result = sk; } } return result; @@ -2045,7 +2048,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) /* * UDP-Lite specific tests, ignored on UDP sockets */ - if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { + if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { /* * MIB statistics other than incrementing the error count are diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index fed91ab7ec46..cf3a88a10ddd 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -183,7 +183,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) return 0; } -void ipv6_sock_ac_close(struct sock *sk) +void __ipv6_sock_ac_close(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct net_device *dev = NULL; @@ -191,10 +191,7 @@ void ipv6_sock_ac_close(struct sock *sk) struct net *net = sock_net(sk); int prev_index; - if (!np->ipv6_ac_list) - return; - - rtnl_lock(); + ASSERT_RTNL(); pac = np->ipv6_ac_list; np->ipv6_ac_list = NULL; @@ -211,6 +208,16 @@ void ipv6_sock_ac_close(struct sock *sk) sock_kfree_s(sk, pac, sizeof(*pac)); pac = next; } +} + +void ipv6_sock_ac_close(struct sock *sk) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + + if (!np->ipv6_ac_list) + return; + rtnl_lock(); + __ipv6_sock_ac_close(sk); rtnl_unlock(); } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 62c997201970..7d3a3894f785 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -535,7 +535,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, if (!sk) goto out_bh_enable; - sk->sk_mark = mark; np = inet6_sk(sk); if (!icmpv6_xrlim_allow(sk, type, &fl6)) @@ -552,6 +551,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, fl6.flowi6_oif = np->ucast_oif; ipcm6_init_sk(&ipc6, np); + ipc6.sockc.mark = mark; fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = icmpv6_route_lookup(net, skb, sk, &fl6); @@ -720,7 +720,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb) sk = icmpv6_xmit_lock(net); if (!sk) goto out_bh_enable; - sk->sk_mark = mark; np = inet6_sk(sk); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) @@ -748,6 +747,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) ipcm6_init_sk(&ipc6, np); ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb)); + ipc6.sockc.mark = mark; if (ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 9ec05a1df5e1..44876509d215 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -127,6 +127,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, gre_proto == htons(ETH_P_ERSPAN2)) ? ARPHRD_ETHER : ARPHRD_IP6GRE; int score, cand_score = 4; + struct net_device *ndev; for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { if (!ipv6_addr_equal(local, &t->parms.laddr) || @@ -238,9 +239,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, if (t && t->dev->flags & IFF_UP) return t; - dev = ign->fb_tunnel_dev; - if (dev && dev->flags & IFF_UP) - return netdev_priv(dev); + ndev = READ_ONCE(ign->fb_tunnel_dev); + if (ndev && ndev->flags & IFF_UP) + return netdev_priv(ndev); return NULL; } @@ -413,6 +414,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) ip6gre_tunnel_unlink_md(ign, t); ip6gre_tunnel_unlink(ign, t); + if (ign->fb_tunnel_dev == dev) + WRITE_ONCE(ign->fb_tunnel_dev, NULL); dst_cache_reset(&t->dst_cache); dev_put(dev); } @@ -1557,17 +1560,18 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) static int __net_init ip6gre_init_net(struct net *net) { struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); + struct net_device *ndev; int err; if (!net_has_fallback_tunnels(net)) return 0; - ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", - NET_NAME_UNKNOWN, - ip6gre_tunnel_setup); - if (!ign->fb_tunnel_dev) { + ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", + NET_NAME_UNKNOWN, ip6gre_tunnel_setup); + if (!ndev) { err = -ENOMEM; goto err_alloc_dev; } + ign->fb_tunnel_dev = ndev; dev_net_set(ign->fb_tunnel_dev, net); /* FB netdevice is special: we have one, and only one per netns. * Allowing to move it to another netns is clearly unsafe. @@ -1587,7 +1591,7 @@ static int __net_init ip6gre_init_net(struct net *net) return 0; err_reg_dev: - free_netdev(ign->fb_tunnel_dev); + free_netdev(ndev); err_alloc_dev: return err; } diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index b5dd20c4599b..8dcf7bacc99a 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -860,7 +860,15 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, struct metadata_dst *tun_dst, bool log_ecn_err) { - return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate, + int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, + const struct ipv6hdr *ipv6h, + struct sk_buff *skb); + + dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate; + if (tpi->proto == htons(ETH_P_IP)) + dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate; + + return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, log_ecn_err); } EXPORT_SYMBOL(ip6_tnl_rcv); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 5af97b4f5df3..5352c7e68c42 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -205,6 +205,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, fl6_free_socklist(sk); __ipv6_sock_mc_close(sk); + __ipv6_sock_ac_close(sk); /* * Sock is moving from IPv6 to IPv4 (sk_prot), so diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index eaa4c2cc2fbb..c875c9b6edbe 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2618,6 +2618,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev) idev->mc_list = i->next; write_unlock_bh(&idev->lock); + ip6_mc_clear_src(i); ma_put(i); write_lock_bh(&idev->lock); } diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 409e79b84a83..6d0e942d082d 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -245,9 +245,6 @@ static const struct nf_ipv6_ops ipv6ops = { .route_input = ip6_route_input, .fragment = ip6_fragment, .reroute = nf_ip6_reroute, -#if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) - .br_defrag = nf_ct_frag6_gather, -#endif #if IS_MODULE(CONFIG_IPV6) .br_fragment = br_ip6_fragment, #endif diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 3b4af0a8bca6..46df6345bb99 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -431,9 +431,12 @@ void fib6_select_path(const struct net *net, struct fib6_result *res, struct fib6_info *sibling, *next_sibling; struct fib6_info *match = res->f6i; - if ((!match->fib6_nsiblings && !match->nh) || have_oif_match) + if (!match->nh && (!match->fib6_nsiblings || have_oif_match)) goto out; + if (match->nh && have_oif_match && res->nh) + return; + /* We might have already computed the hash for ICMPv6 errors. In such * case it will always be non-zero. Otherwise now is the time to do it. */ @@ -3406,7 +3409,7 @@ static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type) if ((flags & RTF_REJECT) || (dev && (dev->flags & IFF_LOOPBACK) && !(addr_type & IPV6_ADDR_LOOPBACK) && - !(flags & RTF_LOCAL))) + !(flags & (RTF_ANYCAST | RTF_LOCAL)))) return true; return false; @@ -3683,14 +3686,14 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, rt->fib6_src.plen = cfg->fc_src_len; #endif if (nh) { - if (!nexthop_get(nh)) { - NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); - goto out; - } if (rt->fib6_src.plen) { NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); goto out; } + if (!nexthop_get(nh)) { + NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); + goto out; + } rt->nh = nh; fib6_nh = nexthop_fib6_nh(rt->nh); } else { diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index ec8fcfc60a27..73842054bfe6 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -21,6 +21,7 @@ #include #endif +static int two = 2; static int flowlabel_reflect_max = 0x7; static int auto_flowlabels_min; static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX; @@ -151,7 +152,7 @@ static struct ctl_table ipv6_table_template[] = { .mode = 0644, .proc_handler = proc_rt6_multipath_hash_policy, .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, + .extra2 = &two, }, { .procname = "seg6_flowlabel", diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 9fec580c968e..6762430280f5 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -148,7 +148,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, int dif, int sdif, struct udp_hslot *hslot2, struct sk_buff *skb) { - struct sock *sk, *result; + struct sock *sk, *result, *reuseport_result; int score, badness; u32 hash = 0; @@ -158,17 +158,20 @@ static struct sock *udp6_lib_lookup2(struct net *net, score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, sdif); if (score > badness) { + reuseport_result = NULL; + if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); - result = reuseport_select_sock(sk, hash, skb, - sizeof(struct udphdr)); - if (result && !reuseport_has_conns(sk, false)) - return result; + reuseport_result = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); + if (reuseport_result && !reuseport_has_conns(sk, false)) + return reuseport_result; } - result = sk; + + result = reuseport_result ? : sk; badness = score; } } @@ -643,7 +646,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) /* * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). */ - if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { + if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { if (up->pcrlen == 0) { /* full coverage was set */ net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", diff --git a/net/key/af_key.c b/net/key/af_key.c index b67ed3a8486c..979c579afc63 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2400,7 +2400,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa return err; } - xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir - 1, &sel, pol_ctx, 1, &err); security_xfrm_policy_free(pol_ctx); @@ -2651,7 +2651,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_ return -EINVAL; delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2); - xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN, dir, pol->sadb_x_policy_id, delete, &err); if (xp == NULL) return -ENOENT; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 6dc3bfa12b1e..95805a6331be 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1030,6 +1030,7 @@ static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, /* Queue the packet to IP for output */ skb->ignore_df = 1; + skb_dst_drop(skb); #if IS_ENABLED(CONFIG_IPV6) if (l2tp_sk_is_v6(tunnel->sock)) error = inet6_csk_xmit(tunnel->sock, skb, NULL); @@ -1101,10 +1102,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len goto out_unlock; } - /* Get routing info from the tunnel socket */ - skb_dst_drop(skb); - skb_dst_set(skb, sk_dst_check(sk, 0)); - inet = inet_sk(sk); fl = &inet->cork.fl; switch (tunnel->encap) { diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index c74f44dfaa22..5abb7f9b7ee5 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -273,6 +273,10 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) if (!sock_flag(sk, SOCK_ZAPPED)) goto out; + if (!addr->sllc_arphrd) + addr->sllc_arphrd = ARPHRD_ETHER; + if (addr->sllc_arphrd != ARPHRD_ETHER) + goto out; rc = -ENODEV; if (sk->sk_bound_dev_if) { llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); @@ -328,7 +332,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) goto out; rc = -EAFNOSUPPORT; - if (unlikely(addr->sllc_family != AF_LLC)) + if (!addr->sllc_arphrd) + addr->sllc_arphrd = ARPHRD_ETHER; + if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER)) goto out; dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); rc = -ENODEV; @@ -336,8 +342,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) if (sk->sk_bound_dev_if) { llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); if (llc->dev) { - if (!addr->sllc_arphrd) - addr->sllc_arphrd = llc->dev->type; if (is_zero_ether_addr(addr->sllc_mac)) memcpy(addr->sllc_mac, llc->dev->dev_addr, IFHWADDRLEN); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 0daaf7e37a21..a9dda5c228f6 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2140,6 +2140,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) ieee80211_stop_mesh(sdata); mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); + kfree(sdata->u.mesh.ie); mutex_unlock(&sdata->local->mtx); return 0; diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 117519bf33d6..aca608ae313f 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -521,6 +521,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, del_timer_sync(&mpath->timer); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); + mesh_path_flush_pending(mpath); kfree_rcu(mpath, rcu); } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 69429c8df7b3..e5fb9002d314 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2305,6 +2305,7 @@ static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) { + struct ieee80211_hdr *hdr = (void *)rx->skb->data; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); @@ -2315,6 +2316,31 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) if (status->flag & RX_FLAG_DECRYPTED) return 0; + /* check mesh EAPOL frames first */ + if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) && + ieee80211_is_data(fc))) { + struct ieee80211s_hdr *mesh_hdr; + u16 hdr_len = ieee80211_hdrlen(fc); + u16 ethertype_offset; + __be16 ethertype; + + if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr)) + goto drop_check; + + /* make sure fixed part of mesh header is there, also checks skb len */ + if (!pskb_may_pull(rx->skb, hdr_len + 6)) + goto drop_check; + + mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len); + ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) + + sizeof(rfc1042_header); + + if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 && + ethertype == rx->sdata->control_port_protocol) + return 0; + } + +drop_check: /* Drop unencrypted frames if key is set. */ if (unlikely(!ieee80211_has_protected(fc) && !ieee80211_is_any_nullfunc(fc) && diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index b1669f024470..f5d96107af6d 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1033,7 +1033,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta) might_sleep(); lockdep_assert_held(&local->sta_mtx); - while (sta->sta_state == IEEE80211_STA_AUTHORIZED) { + if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); WARN_ON_ONCE(ret); } diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index d934384f31ad..6e3cf4d19ce8 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c @@ -314,7 +314,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[], set->variant = &bitmap_ip; if (!init_map_ip(set, map, first_ip, last_ip, elements, hosts, netmask)) { - kfree(map); + ip_set_free(map); return -ENOMEM; } if (tb[IPSET_ATTR_TIMEOUT]) { diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index e8532783b43a..ae7cdc0d0f29 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -363,7 +363,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); set->variant = &bitmap_ipmac; if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { - kfree(map); + ip_set_free(map); return -ENOMEM; } if (tb[IPSET_ATTR_TIMEOUT]) { diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c index e3ac914fff1a..d4a14750f5c4 100644 --- a/net/netfilter/ipset/ip_set_bitmap_port.c +++ b/net/netfilter/ipset/ip_set_bitmap_port.c @@ -247,7 +247,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[], map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); set->variant = &bitmap_port; if (!init_map_port(set, map, first_port, last_port)) { - kfree(map); + ip_set_free(map); return -ENOMEM; } if (tb[IPSET_ATTR_TIMEOUT]) { diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 75da200aa5d8..133a3f1b6f56 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -382,6 +382,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, for (id = 0; id < IPSET_EXT_ID_MAX; id++) { if (!add_extension(id, cadt_flags, tb)) continue; + if (align < ip_set_extensions[id].align) + align = ip_set_extensions[id].align; len = ALIGN(len, ip_set_extensions[id].align); set->offset[id] = len; set->extensions |= ip_set_extensions[id].type; diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index 2389c9f89e48..a7a982a3e676 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -682,7 +682,7 @@ retry: } t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits)); if (!t->hregion) { - kfree(t); + ip_set_free(t); ret = -ENOMEM; goto out; } @@ -1533,7 +1533,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, } t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits)); if (!t->hregion) { - kfree(t); + ip_set_free(t); kfree(h); return -ENOMEM; } diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 8b80ab794a92..64a05906cc0e 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -2061,14 +2061,14 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) { - bool uses_ct = false, resched = false; + bool old_ct = false, resched = false; if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest && unlikely(!atomic_read(&cp->dest->weight))) { resched = true; - uses_ct = ip_vs_conn_uses_conntrack(cp, skb); + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); } else if (is_new_conn_expected(cp, conn_reuse_mode)) { - uses_ct = ip_vs_conn_uses_conntrack(cp, skb); + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); if (!atomic_read(&cp->n_control)) { resched = true; } else { @@ -2076,15 +2076,17 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int * that uses conntrack while it is still * referenced by controlled connection(s). */ - resched = !uses_ct; + resched = !old_ct; } } if (resched) { + if (!old_ct) + cp->flags &= ~IP_VS_CONN_F_NFCT; if (!atomic_read(&cp->n_control)) ip_vs_conn_expire_now(cp); __ip_vs_conn_put(cp); - if (uses_ct) + if (old_ct) return NF_DROP; cp = NULL; } diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 8dc892a9dc91..0c1bc654245c 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1717,6 +1717,8 @@ static int sync_thread_backup(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = tinfo->ipvs; + struct sock *sk = tinfo->sock->sk; + struct udp_sock *up = udp_sk(sk); int len; pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " @@ -1724,12 +1726,14 @@ static int sync_thread_backup(void *data) ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id); while (!kthread_should_stop()) { - wait_event_interruptible(*sk_sleep(tinfo->sock->sk), - !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) - || kthread_should_stop()); + wait_event_interruptible(*sk_sleep(sk), + !skb_queue_empty_lockless(&sk->sk_receive_queue) || + !skb_queue_empty_lockless(&up->reader_queue) || + kthread_should_stop()); /* do we have data now? */ - while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) { + while (!skb_queue_empty_lockless(&sk->sk_receive_queue) || + !skb_queue_empty_lockless(&up->reader_queue)) { len = ip_vs_receive(tinfo->sock, tinfo->buf, ipvs->bcfg.sync_maxlen); if (len <= 0) { diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 48db4aec02de..200cdad3ff3a 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -2012,6 +2012,8 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb) err = __nf_conntrack_update(net, skb, ct, ctinfo); if (err < 0) return err; + + ct = nf_ct_get(skb, &ctinfo); } return nf_confirm_cthelper(skb, ct, ctinfo); diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 4f897b14b606..810cca24b399 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -62,6 +62,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS, }; +#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 + #define sNO SCTP_CONNTRACK_NONE #define sCL SCTP_CONNTRACK_CLOSED #define sCW SCTP_CONNTRACK_COOKIE_WAIT @@ -369,6 +371,7 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, u_int32_t offset, count; unsigned int *timeouts; unsigned long map[256 / sizeof(unsigned long)] = { 0 }; + bool ignore = false; if (sctp_error(skb, dataoff, state)) return -NF_ACCEPT; @@ -427,15 +430,39 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, /* Sec 8.5.1 (D) */ if (sh->vtag != ct->proto.sctp.vtag[dir]) goto out_unlock; - } else if (sch->type == SCTP_CID_HEARTBEAT || - sch->type == SCTP_CID_HEARTBEAT_ACK) { + } else if (sch->type == SCTP_CID_HEARTBEAT) { + if (ct->proto.sctp.vtag[dir] == 0) { + pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir); + ct->proto.sctp.vtag[dir] = sh->vtag; + } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { + if (test_bit(SCTP_CID_DATA, map) || ignore) + goto out_unlock; + + ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + ct->proto.sctp.last_dir = dir; + ignore = true; + continue; + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + } + } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) { if (ct->proto.sctp.vtag[dir] == 0) { pr_debug("Setting vtag %x for dir %d\n", sh->vtag, dir); ct->proto.sctp.vtag[dir] = sh->vtag; } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { - pr_debug("Verification tag check failed\n"); - goto out_unlock; + if (test_bit(SCTP_CID_DATA, map) || ignore) + goto out_unlock; + + if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 || + ct->proto.sctp.last_dir == dir) + goto out_unlock; + + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + ct->proto.sctp.vtag[dir] = sh->vtag; + ct->proto.sctp.vtag[!dir] = 0; + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; } } @@ -470,6 +497,10 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, } spin_unlock_bh(&ct->lock); + /* allow but do not refresh timeout */ + if (ignore) + return NF_ACCEPT; + timeouts = nf_ct_timeout_lookup(ct); if (!timeouts) timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index f7129232c825..2023650c2724 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -744,11 +744,11 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0, family, table); if (err < 0) - goto err; + goto err_fill_table_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_table_info: kfree_skb(skb2); return err; } @@ -1443,11 +1443,11 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0, family, table, chain); if (err < 0) - goto err; + goto err_fill_chain_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_chain_info: kfree_skb(skb2); return err; } @@ -2622,11 +2622,11 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0, family, table, chain, rule, NULL); if (err < 0) - goto err; + goto err_fill_rule_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_rule_info: kfree_skb(skb2); return err; } @@ -3353,7 +3353,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, goto nla_put_failure; } - if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) + if (set->udata && + nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) goto nla_put_failure; desc = nla_nest_start_noflag(skb, NFTA_SET_DESC); @@ -3525,11 +3526,11 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk, err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0); if (err < 0) - goto err; + goto err_fill_set_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_set_info: kfree_skb(skb2); return err; } @@ -4304,24 +4305,18 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, err = -ENOMEM; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (skb == NULL) - goto err1; + return err; err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid, NFT_MSG_NEWSETELEM, 0, set, &elem); if (err < 0) - goto err2; + goto err_fill_setelem; - err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT); - /* This avoids a loop in nfnetlink. */ - if (err < 0) - goto err1; + return nfnetlink_unicast(skb, ctx->net, ctx->portid); - return 0; -err2: +err_fill_setelem: kfree_skb(skb); -err1: - /* this avoids a loop in nfnetlink. */ - return err == -EAGAIN ? -ENOBUFS : err; + return err; } /* called with rcu_read_lock held */ @@ -5498,10 +5493,11 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, family, table, obj, reset); if (err < 0) - goto err; + goto err_fill_obj_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_obj_info: kfree_skb(skb2); return err; } @@ -6173,10 +6169,11 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk, NFT_MSG_NEWFLOWTABLE, 0, family, flowtable); if (err < 0) - goto err; + goto err_fill_flowtable_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_flowtable_info: kfree_skb(skb2); return err; } @@ -6337,10 +6334,11 @@ static int nf_tables_getgen(struct net *net, struct sock *nlsk, err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq); if (err < 0) - goto err; + goto err_fill_gen_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_gen_info: kfree_skb(skb2); return err; } diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 99127e2d95a8..6d03b0909621 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -148,10 +148,15 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) } EXPORT_SYMBOL_GPL(nfnetlink_set_err); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags) +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid) { - return netlink_unicast(net->nfnl, skb, portid, flags); + int err; + + err = nlmsg_unicast(net->nfnl, skb, portid); + if (err == -EAGAIN) + err = -ENOBUFS; + + return err; } EXPORT_SYMBOL_GPL(nfnetlink_unicast); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 0ba020ca38e6..7ca2ca4bba05 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -356,8 +356,7 @@ __nfulnl_send(struct nfulnl_instance *inst) goto out; } } - nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, - MSG_DONTWAIT); + nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid); out: inst->qlen = 0; inst->skb = NULL; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index feabdfb22920..6f0a2bad8ad5 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -681,7 +681,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, *packet_id_ptr = htonl(entry->id); /* nfnetlink_unicast will either free the nskb or add it to a socket */ - err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); + err = nfnetlink_unicast(nskb, net, queue->peer_portid); if (err < 0) { if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { failopen = 1; diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index a5e8469859e3..427d77b111b1 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -44,7 +44,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr, err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); if (priv->flags & NFT_EXTHDR_F_PRESENT) { - *dest = (err >= 0); + nft_reg_store8(dest, err >= 0); return; } else if (err < 0) { goto err; @@ -141,7 +141,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr, err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type); if (priv->flags & NFT_EXTHDR_F_PRESENT) { - *dest = (err >= 0); + nft_reg_store8(dest, err >= 0); return; } else if (err < 0) { goto err; diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index c3c93e95b46e..243e8107f456 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -129,7 +129,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, priv->type = NF_NAT_MANIP_DST; break; default: - return -EINVAL; + return -EOPNOTSUPP; } if (tb[NFTA_NAT_FAMILY] == NULL) @@ -196,7 +196,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, if (tb[NFTA_NAT_FLAGS]) { priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); if (priv->flags & ~NF_NAT_RANGE_MASK) - return -EINVAL; + return -EOPNOTSUPP; } return nf_ct_netns_get(ctx->net, family); diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 0e3bfbc26e79..62dc728bf93c 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -79,7 +79,9 @@ void nft_payload_eval(const struct nft_expr *expr, u32 *dest = ®s->data[priv->dreg]; int offset; - dest[priv->len / NFT_REG32_SIZE] = 0; + if (priv->len % NFT_REG32_SIZE) + dest[priv->len / NFT_REG32_SIZE] = 0; + switch (priv->base) { case NFT_PAYLOAD_LL_HEADER: if (!skb_mac_header_was_set(skb)) diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index f5d34da0646e..12aa803b2f68 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -85,6 +85,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) kfree(netlbl_domhsh_addr6_entry(iter6)); } #endif /* IPv6 */ + kfree(ptr->def.addrsel); } kfree(ptr->domain); kfree(ptr); @@ -536,6 +537,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, goto add_return; } #endif /* IPv6 */ + /* cleanup the new entry since we've moved everything over */ + netlbl_domhsh_free_entry(&entry->rcu); } else ret_val = -EINVAL; @@ -579,6 +582,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, { int ret_val = 0; struct audit_buffer *audit_buf; + struct netlbl_af4list *iter4; + struct netlbl_domaddr4_map *map4; +#if IS_ENABLED(CONFIG_IPV6) + struct netlbl_af6list *iter6; + struct netlbl_domaddr6_map *map6; +#endif /* IPv6 */ if (entry == NULL) return -ENOENT; @@ -596,6 +605,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, ret_val = -ENOENT; spin_unlock(&netlbl_domhsh_lock); + if (ret_val) + return ret_val; + audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, @@ -605,40 +617,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, audit_log_end(audit_buf); } - if (ret_val == 0) { - struct netlbl_af4list *iter4; - struct netlbl_domaddr4_map *map4; -#if IS_ENABLED(CONFIG_IPV6) - struct netlbl_af6list *iter6; - struct netlbl_domaddr6_map *map6; -#endif /* IPv6 */ - - switch (entry->def.type) { - case NETLBL_NLTYPE_ADDRSELECT: - netlbl_af4list_foreach_rcu(iter4, - &entry->def.addrsel->list4) { - map4 = netlbl_domhsh_addr4_entry(iter4); - cipso_v4_doi_putdef(map4->def.cipso); - } -#if IS_ENABLED(CONFIG_IPV6) - netlbl_af6list_foreach_rcu(iter6, - &entry->def.addrsel->list6) { - map6 = netlbl_domhsh_addr6_entry(iter6); - calipso_doi_putdef(map6->def.calipso); - } -#endif /* IPv6 */ - break; - case NETLBL_NLTYPE_CIPSOV4: - cipso_v4_doi_putdef(entry->def.cipso); - break; -#if IS_ENABLED(CONFIG_IPV6) - case NETLBL_NLTYPE_CALIPSO: - calipso_doi_putdef(entry->def.calipso); - break; -#endif /* IPv6 */ + switch (entry->def.type) { + case NETLBL_NLTYPE_ADDRSELECT: + netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) { + map4 = netlbl_domhsh_addr4_entry(iter4); + cipso_v4_doi_putdef(map4->def.cipso); } - call_rcu(&entry->rcu, netlbl_domhsh_free_entry); +#if IS_ENABLED(CONFIG_IPV6) + netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) { + map6 = netlbl_domhsh_addr6_entry(iter6); + calipso_doi_putdef(map6->def.calipso); + } +#endif /* IPv6 */ + break; + case NETLBL_NLTYPE_CIPSOV4: + cipso_v4_doi_putdef(entry->def.cipso); + break; +#if IS_ENABLED(CONFIG_IPV6) + case NETLBL_NLTYPE_CALIPSO: + calipso_doi_putdef(entry->def.calipso); + break; +#endif /* IPv6 */ } + call_rcu(&entry->rcu, netlbl_domhsh_free_entry); return ret_val; } diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index efccd1ac9a66..102b8d6b5612 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -989,60 +989,11 @@ static struct genl_family genl_ctrl __ro_after_init = { .netnsok = true, }; -static int genl_bind(struct net *net, int group) -{ - struct genl_family *f; - int err = -ENOENT; - unsigned int id; - - down_read(&cb_lock); - - idr_for_each_entry(&genl_fam_idr, f, id) { - if (group >= f->mcgrp_offset && - group < f->mcgrp_offset + f->n_mcgrps) { - int fam_grp = group - f->mcgrp_offset; - - if (!f->netnsok && net != &init_net) - err = -ENOENT; - else if (f->mcast_bind) - err = f->mcast_bind(net, fam_grp); - else - err = 0; - break; - } - } - up_read(&cb_lock); - - return err; -} - -static void genl_unbind(struct net *net, int group) -{ - struct genl_family *f; - unsigned int id; - - down_read(&cb_lock); - - idr_for_each_entry(&genl_fam_idr, f, id) { - if (group >= f->mcgrp_offset && - group < f->mcgrp_offset + f->n_mcgrps) { - int fam_grp = group - f->mcgrp_offset; - - if (f->mcast_unbind) - f->mcast_unbind(net, fam_grp); - break; - } - } - up_read(&cb_lock); -} - static int __net_init genl_pernet_init(struct net *net) { struct netlink_kernel_cfg cfg = { .input = genl_rcv, .flags = NL_CFG_F_NONROOT_RECV, - .bind = genl_bind, - .unbind = genl_unbind, }; /* we'll bump the group number right afterwards */ diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index ba5ffd3badd3..b5c867fe3232 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -332,10 +332,13 @@ static int rawsock_create(struct net *net, struct socket *sock, if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW)) return -ESOCKTNOSUPPORT; - if (sock->type == SOCK_RAW) + if (sock->type == SOCK_RAW) { + if (!capable(CAP_NET_RAW)) + return -EPERM; sock->ops = &rawsock_raw_ops; - else + } else { sock->ops = &rawsock_ops; + } sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern); if (!sk) diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 99352f09deaa..3d96dab10449 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1146,9 +1146,10 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb, struct sw_flow_key *key, const struct nlattr *attr, bool last) { + struct ovs_skb_cb *ovs_cb = OVS_CB(skb); const struct nlattr *actions, *cpl_arg; + int len, max_len, rem = nla_len(attr); const struct check_pkt_len_arg *arg; - int rem = nla_len(attr); bool clone_flow_key; /* The first netlink attribute in 'attr' is always @@ -1157,7 +1158,11 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb, cpl_arg = nla_data(attr); arg = nla_data(cpl_arg); - if (skb->len <= arg->pkt_len) { + len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len; + max_len = arg->pkt_len; + + if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) || + len <= max_len) { /* Second netlink attribute in 'attr' is always * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'. */ diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 8b70298857e3..c86e404cd65b 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -276,10 +276,6 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) ovs_ct_update_key(skb, NULL, key, false, false); } -#define IN6_ADDR_INITIALIZER(ADDR) \ - { (ADDR).s6_addr32[0], (ADDR).s6_addr32[1], \ - (ADDR).s6_addr32[2], (ADDR).s6_addr32[3] } - int ovs_ct_put_key(const struct sw_flow_key *swkey, const struct sw_flow_key *output, struct sk_buff *skb) { @@ -301,24 +297,30 @@ int ovs_ct_put_key(const struct sw_flow_key *swkey, if (swkey->ct_orig_proto) { if (swkey->eth.type == htons(ETH_P_IP)) { - struct ovs_key_ct_tuple_ipv4 orig = { - output->ipv4.ct_orig.src, - output->ipv4.ct_orig.dst, - output->ct.orig_tp.src, - output->ct.orig_tp.dst, - output->ct_orig_proto, - }; + struct ovs_key_ct_tuple_ipv4 orig; + + memset(&orig, 0, sizeof(orig)); + orig.ipv4_src = output->ipv4.ct_orig.src; + orig.ipv4_dst = output->ipv4.ct_orig.dst; + orig.src_port = output->ct.orig_tp.src; + orig.dst_port = output->ct.orig_tp.dst; + orig.ipv4_proto = output->ct_orig_proto; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, sizeof(orig), &orig)) return -EMSGSIZE; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - struct ovs_key_ct_tuple_ipv6 orig = { - IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.src), - IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst), - output->ct.orig_tp.src, - output->ct.orig_tp.dst, - output->ct_orig_proto, - }; + struct ovs_key_ct_tuple_ipv6 orig; + + memset(&orig, 0, sizeof(orig)); + memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32, + sizeof(orig.ipv6_src)); + memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32, + sizeof(orig.ipv6_dst)); + orig.src_port = output->ct.orig_tp.src; + orig.dst_port = output->ct.orig_tp.dst; + orig.ipv6_proto = output->ct_orig_proto; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, sizeof(orig), &orig)) return -EMSGSIZE; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1d63ab3a878a..fbc2d4dfddf0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -941,6 +941,7 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) } static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) + __releases(&pkc->blk_fill_in_prog_lock) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); atomic_dec(&pkc->blk_fill_in_prog); @@ -988,6 +989,7 @@ static void prb_fill_curr_block(char *curr, struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd, unsigned int len) + __acquires(&pkc->blk_fill_in_prog_lock) { struct tpacket3_hdr *ppd; @@ -2167,7 +2169,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, int skb_len = skb->len; unsigned int snaplen, res; unsigned long status = TP_STATUS_USER; - unsigned short macoff, netoff, hdrlen; + unsigned short macoff, hdrlen; + unsigned int netoff; struct sk_buff *copy_skb = NULL; struct timespec ts; __u32 ts_status; @@ -2236,6 +2239,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, } macoff = netoff - maclen; } + if (netoff > USHRT_MAX) { + atomic_inc(&po->tp_drops); + goto drop_n_restore; + } if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && @@ -2285,8 +2292,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (do_vnet && virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), - vio_le(), true, 0)) + vio_le(), true, 0)) { + if (po->tp_version == TPACKET_V3) + prb_clear_blk_fill_status(&po->rx_ring); goto drop_n_account; + } if (po->tp_version <= TPACKET_V2) { packet_increment_rx_head(po, &po->rx_ring); @@ -2392,7 +2402,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, __clear_bit(slot_id, po->rx_ring.rx_owner_map); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); - } else { + } else if (po->tp_version == TPACKET_V3) { prb_clear_blk_fill_status(&po->rx_ring); } diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 2a4d50e04441..a699e318b9a0 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -259,7 +259,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) unsigned int ver; size_t hdrlen; - if (len & 3) + if (len == 0 || len & 3) return -EINVAL; skb = netdev_alloc_skb(NULL, len); @@ -273,6 +273,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) switch (ver) { case QRTR_PROTO_VER_1: + if (len < sizeof(*v1)) + goto err; v1 = data; hdrlen = sizeof(*v1); @@ -286,6 +288,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) size = le32_to_cpu(v1->size); break; case QRTR_PROTO_VER_2: + if (len < sizeof(*v2)) + goto err; v2 = data; hdrlen = sizeof(*v2) + v2->optlen; @@ -543,23 +547,25 @@ static void qrtr_port_remove(struct qrtr_sock *ipc) */ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) { + u32 min_port; int rc; mutex_lock(&qrtr_port_lock); if (!*port) { - rc = idr_alloc(&qrtr_ports, ipc, - QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1, - GFP_ATOMIC); - if (rc >= 0) - *port = rc; + min_port = QRTR_MIN_EPH_SOCKET; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC); + if (!rc) + *port = min_port; } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) { rc = -EACCES; } else if (*port == QRTR_PORT_CTRL) { - rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC); + min_port = 0; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC); } else { - rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC); - if (rc >= 0) - *port = rc; + min_port = *port; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC); + if (!rc) + *port = min_port; } mutex_unlock(&qrtr_port_lock); @@ -1000,6 +1006,7 @@ static int qrtr_release(struct socket *sock) sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); + sock_orphan(sk); sock->sk = NULL; if (!sock_flag(sk, SOCK_ZAPPED)) diff --git a/net/rds/recv.c b/net/rds/recv.c index c8404971d5ab..aba4afe4dfed 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -450,12 +450,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) { struct rds_notifier *notifier; - struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */ + struct rds_rdma_notify cmsg; unsigned int count = 0, max_messages = ~0U; unsigned long flags; LIST_HEAD(copy); int err = 0; + memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */ /* put_cmsg copies to user space and thus may sleep. We can't do this * with rs_lock held, so first grab as many notifications as we can stuff diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index b7611cc159e5..032ed76c0166 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -22,6 +22,11 @@ #include #include "ar-internal.h" +static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call, + unsigned long user_call_ID) +{ +} + /* * Preallocate a single service call, connection and peer and, if possible, * give them a user ID and attach the user's side of the ID to them. @@ -228,6 +233,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) if (rx->discard_new_call) { _debug("discard %lx", call->user_call_ID); rx->discard_new_call(call, call->user_call_ID); + if (call->notify_rx) + call->notify_rx = rxrpc_dummy_notify; rxrpc_put_call(call, rxrpc_call_put_kernel); } rxrpc_call_completed(call); diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index 2a65ac41055f..9ff85ee8337c 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -248,7 +248,18 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) if (anno_type != RXRPC_TX_ANNO_RETRANS) continue; + /* We need to reset the retransmission state, but we need to do + * so before we drop the lock as a new ACK/NAK may come in and + * confuse things + */ + annotation &= ~RXRPC_TX_ANNO_MASK; + annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT; + call->rxtx_annotations[ix] = annotation; + skb = call->rxtx_buffer[ix]; + if (!skb) + continue; + rxrpc_get_skb(skb, rxrpc_skb_got); spin_unlock_bh(&call->lock); @@ -262,24 +273,6 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) rxrpc_free_skb(skb, rxrpc_skb_freed); spin_lock_bh(&call->lock); - - /* We need to clear the retransmit state, but there are two - * things we need to be aware of: A new ACK/NAK might have been - * received and the packet might have been hard-ACK'd (in which - * case it will no longer be in the buffer). - */ - if (after(seq, call->tx_hard_ack)) { - annotation = call->rxtx_annotations[ix]; - anno_type = annotation & RXRPC_TX_ANNO_MASK; - if (anno_type == RXRPC_TX_ANNO_RETRANS || - anno_type == RXRPC_TX_ANNO_NAK) { - annotation &= ~RXRPC_TX_ANNO_MASK; - annotation |= RXRPC_TX_ANNO_UNACK; - } - annotation |= RXRPC_TX_ANNO_RESENT; - call->rxtx_annotations[ix] = annotation; - } - if (after(call->tx_hard_ack, seq)) seq = call->tx_hard_ack; } diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index f07970207b54..38a46167523f 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -288,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, */ ret = rxrpc_connect_call(rx, call, cp, srx, gfp); if (ret < 0) - goto error; + goto error_attached_to_socket; trace_rxrpc_call(call->debug_id, rxrpc_call_connected, atomic_read(&call->usage), here, NULL); @@ -308,18 +308,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, error_dup_user_ID: write_unlock(&rx->call_lock); release_sock(&rx->sk); - ret = -EEXIST; - -error: __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, - RX_CALL_DEAD, ret); + RX_CALL_DEAD, -EEXIST); trace_rxrpc_call(call->debug_id, rxrpc_call_error, - atomic_read(&call->usage), here, ERR_PTR(ret)); + atomic_read(&call->usage), here, ERR_PTR(-EEXIST)); rxrpc_release_call(rx, call); mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put); - _leave(" = %d", ret); - return ERR_PTR(ret); + _leave(" = -EEXIST"); + return ERR_PTR(-EEXIST); + + /* We got an error, but the call is attached to the socket and is in + * need of release. However, we might now race with recvmsg() when + * completing the call queues it. Return 0 from sys_sendmsg() and + * leave the error to recvmsg() to deal with. + */ +error_attached_to_socket: + trace_rxrpc_call(call->debug_id, rxrpc_call_error, + atomic_read(&call->usage), here, ERR_PTR(ret)); + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); + __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, + RX_CALL_DEAD, ret); + _leave(" = c=%08x [err]", call->debug_id); + return call; } /* diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 19e141eeed17..8cbe0bf20ed5 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -212,9 +212,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) call->peer->cong_cwnd = call->cong_cwnd; - spin_lock_bh(&conn->params.peer->lock); - hlist_del_rcu(&call->error_link); - spin_unlock_bh(&conn->params.peer->lock); + if (!hlist_unhashed(&call->error_link)) { + spin_lock_bh(&call->peer->lock); + hlist_del_rcu(&call->error_link); + spin_unlock_bh(&call->peer->lock); + } if (rxrpc_is_client_call(call)) return rxrpc_disconnect_client_call(call); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 3be4177baf70..6cace43b217e 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -723,13 +723,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), rwind, ntohl(ackinfo->jumbo_max)); + if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) + rwind = RXRPC_RXTX_BUFF_SIZE - 1; if (call->tx_winsize != rwind) { - if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) - rwind = RXRPC_RXTX_BUFF_SIZE - 1; if (rwind > call->tx_winsize) wake = true; - trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, - ntohl(ackinfo->rwind), wake); + trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake); call->tx_winsize = rwind; } @@ -845,7 +844,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) struct rxrpc_ackinfo info; u8 acks[RXRPC_MAXACKS]; } buf; - rxrpc_serial_t acked_serial; + rxrpc_serial_t ack_serial, acked_serial; rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt; int nr_acks, offset, ioffset; @@ -858,6 +857,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) } offset += sizeof(buf.ack); + ack_serial = sp->hdr.serial; acked_serial = ntohl(buf.ack.serial); first_soft_ack = ntohl(buf.ack.firstPacket); prev_pkt = ntohl(buf.ack.previousPacket); @@ -866,31 +866,31 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ? buf.ack.reason : RXRPC_ACK__INVALID); - trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial, + trace_rxrpc_rx_ack(call, ack_serial, acked_serial, first_soft_ack, prev_pkt, summary.ack_reason, nr_acks); if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) rxrpc_input_ping_response(call, skb->tstamp, acked_serial, - sp->hdr.serial); + ack_serial); if (buf.ack.reason == RXRPC_ACK_REQUESTED) rxrpc_input_requested_ack(call, skb->tstamp, acked_serial, - sp->hdr.serial); + ack_serial); if (buf.ack.reason == RXRPC_ACK_PING) { - _proto("Rx ACK %%%u PING Request", sp->hdr.serial); + _proto("Rx ACK %%%u PING Request", ack_serial); rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, - sp->hdr.serial, true, true, + ack_serial, true, true, rxrpc_propose_ack_respond_to_ping); } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, - sp->hdr.serial, true, true, + ack_serial, true, true, rxrpc_propose_ack_respond_to_ack); } /* Discard any out-of-order or duplicate ACKs (outside lock). */ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { - trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, + trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, first_soft_ack, call->ackr_first_seq, prev_pkt, call->ackr_prev_seq); return; @@ -906,7 +906,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) /* Discard any out-of-order or duplicate ACKs (inside lock). */ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { - trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, + trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, first_soft_ack, call->ackr_first_seq, prev_pkt, call->ackr_prev_seq); goto out; @@ -966,7 +966,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) RXRPC_TX_ANNO_LAST && summary.nr_acks == call->tx_top - hard_ack && rxrpc_is_client_call(call)) - rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, + rxrpc_propose_ACK(call, RXRPC_ACK_PING, ack_serial, false, true, rxrpc_propose_ack_ping_for_lost_reply); diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index efce27802a74..e011594adcd1 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -500,11 +500,21 @@ EXPORT_SYMBOL(rxrpc_kernel_get_peer); * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT * @sock: The socket on which the call is in progress. * @call: The call to query + * @_srtt: Where to store the SRTT value. * - * Get the call's peer smoothed RTT. + * Get the call's peer smoothed RTT in uS. */ -u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call) +bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call, + u32 *_srtt) { - return call->peer->srtt_us >> 3; + struct rxrpc_peer *peer = call->peer; + + if (peer->rtt_count == 0) { + *_srtt = 1000000; /* 1S */ + return false; + } + + *_srtt = call->peer->srtt_us >> 3; + return true; } EXPORT_SYMBOL(rxrpc_kernel_get_srtt); diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c index 8b179e3c802a..543afd9bd664 100644 --- a/net/rxrpc/proc.c +++ b/net/rxrpc/proc.c @@ -68,7 +68,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) "Proto Local " " Remote " " SvID ConnID CallID End Use State Abort " - " UserID TxSeq TW RxSeq RW RxSerial RxTimo\n"); + " DebugId TxSeq TW RxSeq RW RxSerial RxTimo\n"); return 0; } @@ -100,7 +100,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) rx_hard_ack = READ_ONCE(call->rx_hard_ack); seq_printf(seq, "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u" - " %-8.8s %08x %lx %08x %02x %08x %02x %08x %06lx\n", + " %-8.8s %08x %08x %08x %02x %08x %02x %08x %06lx\n", lbuff, rbuff, call->service_id, @@ -110,7 +110,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v) atomic_read(&call->usage), rxrpc_call_states[call->state], call->abort_code, - call->user_call_ID, + call->debug_id, tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack, rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack, call->rx_serial, diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 8578c39ec839..4f48e3bdd4b4 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -464,7 +464,7 @@ try_again: list_empty(&rx->recvmsg_q) && rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); - return -ENODATA; + return -EAGAIN; } if (list_empty(&rx->recvmsg_q)) { @@ -541,7 +541,7 @@ try_again: goto error_unlock_call; } - if (msg->msg_name) { + if (msg->msg_name && call->peer) { struct sockaddr_rxrpc *srx = msg->msg_name; size_t len = sizeof(call->peer->srx); diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 5e9c43d4a314..1a340eb0abf7 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -306,7 +306,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, /* this should be in poll */ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); - if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) + if (sk->sk_shutdown & SEND_SHUTDOWN) return -EPIPE; more = msg->msg_flags & MSG_MORE; @@ -683,6 +683,9 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (IS_ERR(call)) return PTR_ERR(call); /* ... and we have the call lock. */ + ret = 0; + if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) + goto out_put_unlock; } else { switch (READ_ONCE(call->state)) { case RXRPC_CALL_UNINITIALISED: diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 2b43cacf82af..1a8f2f85ea1a 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -43,17 +43,20 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a, tcf_lastuse_update(&ca->tcf_tm); bstats_update(&ca->tcf_bstats, skb); - if (skb->protocol == htons(ETH_P_IP)) { + switch (skb_protocol(skb, true)) { + case htons(ETH_P_IP): if (skb->len < sizeof(struct iphdr)) goto out; proto = NFPROTO_IPV4; - } else if (skb->protocol == htons(ETH_P_IPV6)) { + break; + case htons(ETH_P_IPV6): if (skb->len < sizeof(struct ipv6hdr)) goto out; proto = NFPROTO_IPV6; - } else { + break; + default: goto out; } diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index d3cfad88dc3a..428b1ae00123 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -587,7 +587,7 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a, goto drop; update_flags = params->update_flags; - protocol = tc_skb_protocol(skb); + protocol = skb_protocol(skb, false); again: switch (protocol) { case cpu_to_be16(ETH_P_IP): diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 0586546c20d7..e32c4732ddf8 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -100,7 +100,7 @@ static u8 tcf_ct_skb_nf_family(struct sk_buff *skb) { u8 family = NFPROTO_UNSPEC; - switch (skb->protocol) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): family = NFPROTO_IPV4; break; @@ -186,7 +186,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); err = nf_ct_frag6_gather(net, skb, user); if (err && err != -EINPROGRESS) - goto out_free; + return err; #else err = -EOPNOTSUPP; goto out_free; @@ -222,6 +222,7 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, const struct nf_nat_range2 *range, enum nf_nat_manip_type maniptype) { + __be16 proto = skb_protocol(skb, true); int hooknum, err = NF_ACCEPT; /* See HOOK2MANIP(). */ @@ -233,14 +234,13 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, switch (ctinfo) { case IP_CT_RELATED: case IP_CT_RELATED_REPLY: - if (skb->protocol == htons(ETH_P_IP) && + if (proto == htons(ETH_P_IP) && ip_hdr(skb)->protocol == IPPROTO_ICMP) { if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, hooknum)) err = NF_DROP; goto out; - } else if (IS_ENABLED(CONFIG_IPV6) && - skb->protocol == htons(ETH_P_IPV6)) { + } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) { __be16 frag_off; u8 nexthdr = ipv6_hdr(skb)->nexthdr; int hdrlen = ipv6_skip_exthdr(skb, @@ -993,4 +993,3 @@ MODULE_AUTHOR("Yossi Kuperman "); MODULE_AUTHOR("Marcelo Ricardo Leitner "); MODULE_DESCRIPTION("Connection tracking action"); MODULE_LICENSE("GPL v2"); - diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index f45995a6237a..a91fcee810ef 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -96,19 +96,22 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, action = READ_ONCE(ca->tcf_action); wlen = skb_network_offset(skb); - if (tc_skb_protocol(skb) == htons(ETH_P_IP)) { + switch (skb_protocol(skb, true)) { + case htons(ETH_P_IP): wlen += sizeof(struct iphdr); if (!pskb_may_pull(skb, wlen)) goto out; proto = NFPROTO_IPV4; - } else if (tc_skb_protocol(skb) == htons(ETH_P_IPV6)) { + break; + case htons(ETH_P_IPV6): wlen += sizeof(struct ipv6hdr); if (!pskb_may_pull(skb, wlen)) goto out; proto = NFPROTO_IPV6; - } else { + break; + default: goto out; } diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index db570d2bd0e0..f786775699b5 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -82,7 +82,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a, goto drop; break; case TCA_MPLS_ACT_PUSH: - new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol)); + new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb_protocol(skb, true))); if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len, skb->dev && skb->dev->type == ARPHRD_ETHER)) goto drop; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 6a8d3337c577..f98b2791ecec 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -41,7 +41,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, if (params->flags & SKBEDIT_F_INHERITDSFIELD) { int wlen = skb_network_offset(skb); - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): wlen += sizeof(struct iphdr); if (!pskb_may_pull(skb, wlen)) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 68c8fc6f535c..d7604417367d 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1571,7 +1571,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, reclassify: #endif for (; tp; tp = rcu_dereference_bh(tp->next)) { - __be16 protocol = tc_skb_protocol(skb); + __be16 protocol = skb_protocol(skb, false); int err; if (tp->protocol != protocol && diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 80ae7b9fa90a..ab53a93b2f2b 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -80,7 +80,7 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) if (dst) return ntohl(dst); - return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); + return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true); } static u32 flow_get_proto(const struct sk_buff *skb, @@ -104,7 +104,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb, if (flow->ports.ports) return ntohs(flow->ports.dst); - return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); + return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true); } static u32 flow_get_iif(const struct sk_buff *skb) @@ -151,7 +151,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb) static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow) { - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): return ntohl(CTTUPLE(skb, src.u3.ip)); case htons(ETH_P_IPV6): @@ -164,7 +164,7 @@ fallback: static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow) { - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): return ntohl(CTTUPLE(skb, dst.u3.ip)); case htons(ETH_P_IPV6): diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 1d270540e74d..c5a0f2c2635e 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -310,7 +310,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, /* skb_flow_dissect() does not set n_proto in case an unknown * protocol, so do it rather here. */ - skb_key.basic.n_proto = skb->protocol; + skb_key.basic.n_proto = skb_protocol(skb, false); skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, fl_ct_info_to_flower_map, diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c index df00566d327d..c95cf86fb431 100644 --- a/net/sched/em_ipset.c +++ b/net/sched/em_ipset.c @@ -59,7 +59,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em, }; int ret, network_offset; - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): state.pf = NFPROTO_IPV4; if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) diff --git a/net/sched/em_ipt.c b/net/sched/em_ipt.c index 9fff6480acc6..e2c157df3f8b 100644 --- a/net/sched/em_ipt.c +++ b/net/sched/em_ipt.c @@ -212,7 +212,7 @@ static int em_ipt_match(struct sk_buff *skb, struct tcf_ematch *em, struct nf_hook_state state; int ret; - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) return 0; diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 3177dcb17316..ad007cdcec97 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -195,7 +195,7 @@ META_COLLECTOR(int_priority) META_COLLECTOR(int_protocol) { /* Let userspace take care of the byte ordering */ - dst->value = tc_skb_protocol(skb); + dst->value = skb_protocol(skb, false); } META_COLLECTOR(int_pkttype) diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index f4f9b8cdbffb..6385995dc700 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -553,16 +553,16 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt, if (!p->link.q) p->link.q = &noop_qdisc; pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); + p->link.vcc = NULL; + p->link.sock = NULL; + p->link.common.classid = sch->handle; + p->link.ref = 1; err = tcf_block_get(&p->link.block, &p->link.filter_list, sch, extack); if (err) return err; - p->link.vcc = NULL; - p->link.sock = NULL; - p->link.common.classid = sch->handle; - p->link.ref = 1; tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); return 0; } diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 2277369feae5..896c0562cb42 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -592,7 +592,7 @@ static void cake_update_flowkeys(struct flow_keys *keys, struct nf_conntrack_tuple tuple = {}; bool rev = !skb->_nfct; - if (tc_skb_protocol(skb) != htons(ETH_P_IP)) + if (skb_protocol(skb, true) != htons(ETH_P_IP)) return; if (!nf_ct_get_tuple_skb(&tuple, skb)) @@ -1515,32 +1515,51 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) return idx + (tin << 16); } -static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash) +static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash) { - int wlen = skb_network_offset(skb); + const int offset = skb_network_offset(skb); + u16 *buf, buf_; u8 dscp; - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): - wlen += sizeof(struct iphdr); - if (!pskb_may_pull(skb, wlen) || - skb_try_make_writable(skb, wlen)) + buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_); + if (unlikely(!buf)) return 0; - dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; - if (wash && dscp) + /* ToS is in the second byte of iphdr */ + dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2; + + if (wash && dscp) { + const int wlen = offset + sizeof(struct iphdr); + + if (!pskb_may_pull(skb, wlen) || + skb_try_make_writable(skb, wlen)) + return 0; + ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); + } + return dscp; case htons(ETH_P_IPV6): - wlen += sizeof(struct ipv6hdr); - if (!pskb_may_pull(skb, wlen) || - skb_try_make_writable(skb, wlen)) + buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_); + if (unlikely(!buf)) return 0; - dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; - if (wash && dscp) + /* Traffic class is in the first and second bytes of ipv6hdr */ + dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2; + + if (wash && dscp) { + const int wlen = offset + sizeof(struct ipv6hdr); + + if (!pskb_may_pull(skb, wlen) || + skb_try_make_writable(skb, wlen)) + return 0; + ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); + } + return dscp; case htons(ETH_P_ARP): @@ -1557,14 +1576,17 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, { struct cake_sched_data *q = qdisc_priv(sch); u32 tin, mark; + bool wash; u8 dscp; /* Tin selection: Default to diffserv-based selection, allow overriding - * using firewall marks or skb->priority. + * using firewall marks or skb->priority. Call DSCP parsing early if + * wash is enabled, otherwise defer to below to skip unneeded parsing. */ - dscp = cake_handle_diffserv(skb, - q->rate_flags & CAKE_FLAG_WASH); mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft; + wash = !!(q->rate_flags & CAKE_FLAG_WASH); + if (wash) + dscp = cake_handle_diffserv(skb, wash); if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) tin = 0; @@ -1578,6 +1600,8 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; else { + if (!wash) + dscp = cake_handle_diffserv(skb, wash); tin = q->tin_index[dscp]; if (unlikely(tin >= q->tin_cnt)) @@ -2679,7 +2703,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt, qdisc_watchdog_init(&q->watchdog, sch); if (opt) { - int err = cake_change(sch, opt, extack); + err = cake_change(sch, opt, extack); if (err) return err; @@ -2996,7 +3020,7 @@ static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl, PUT_STAT_S32(BLUE_TIMER_US, ktime_to_us( ktime_sub(now, - flow->cvars.blue_timer))); + flow->cvars.blue_timer))); } if (flow->cvars.dropping) { PUT_STAT_S32(DROP_NEXT_US, diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 05605b30bef3..2b88710994d7 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -210,7 +210,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (p->set_tc_index) { int wlen = skb_network_offset(skb); - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): wlen += sizeof(struct iphdr); if (!pskb_may_pull(skb, wlen) || @@ -303,7 +303,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) index = skb->tc_index & (p->indices - 1); pr_debug("index %d->%d\n", skb->tc_index, index); - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask, p->mv[index].value); @@ -320,7 +320,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) */ if (p->mv[index].mask != 0xff || p->mv[index].value) pr_warn("%s: unsupported protocol %d\n", - __func__, ntohs(tc_skb_protocol(skb))); + __func__, ntohs(skb_protocol(skb, true))); break; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index f5cc6069131e..7885aef4ae37 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -476,6 +476,7 @@ void __netdev_watchdog_up(struct net_device *dev) dev_hold(dev); } } +EXPORT_SYMBOL_GPL(__netdev_watchdog_up); static void dev_watchdog_up(struct net_device *dev) { diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index b1eb12d33b9a..6a5086e586ef 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1177,9 +1177,27 @@ static void taprio_offload_config_changed(struct taprio_sched *q) spin_unlock(&q->current_entry_lock); } -static void taprio_sched_to_offload(struct taprio_sched *q, +static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask) +{ + u32 i, queue_mask = 0; + + for (i = 0; i < dev->num_tc; i++) { + u32 offset, count; + + if (!(tc_mask & BIT(i))) + continue; + + offset = dev->tc_to_txq[i].offset; + count = dev->tc_to_txq[i].count; + + queue_mask |= GENMASK(offset + count - 1, offset); + } + + return queue_mask; +} + +static void taprio_sched_to_offload(struct net_device *dev, struct sched_gate_list *sched, - const struct tc_mqprio_qopt *mqprio, struct tc_taprio_qopt_offload *offload) { struct sched_entry *entry; @@ -1194,7 +1212,8 @@ static void taprio_sched_to_offload(struct taprio_sched *q, e->command = entry->command; e->interval = entry->interval; - e->gate_mask = entry->gate_mask; + e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask); + i++; } @@ -1202,7 +1221,6 @@ static void taprio_sched_to_offload(struct taprio_sched *q, } static int taprio_enable_offload(struct net_device *dev, - struct tc_mqprio_qopt *mqprio, struct taprio_sched *q, struct sched_gate_list *sched, struct netlink_ext_ack *extack) @@ -1224,7 +1242,7 @@ static int taprio_enable_offload(struct net_device *dev, return -ENOMEM; } offload->enable = 1; - taprio_sched_to_offload(q, sched, mqprio, offload); + taprio_sched_to_offload(dev, sched, offload); err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); if (err < 0) { @@ -1486,7 +1504,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, } if (FULL_OFFLOAD_IS_ENABLED(q->flags)) - err = taprio_enable_offload(dev, mqprio, q, new_admin, extack); + err = taprio_enable_offload(dev, q, new_admin, extack); else err = taprio_disable_offload(dev, q, extack); if (err) diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 689ef6f3ded8..2f1f0a378408 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -239,7 +239,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, char haddr[MAX_ADDR_LEN]; neigh_ha_snapshot(haddr, n, dev); - err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)), + err = dev_hard_header(skb, dev, ntohs(skb_protocol(skb, false)), haddr, NULL, skb->len); if (err < 0) diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 41839b85c268..fb6f62264e87 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1569,12 +1569,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, enum sctp_scope scope, gfp_t gfp) { + struct sock *sk = asoc->base.sk; int flags; /* Use scoping rules to determine the subset of addresses from * the endpoint. */ - flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; + flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; + if (!inet_v6_ipv6only(sk)) + flags |= SCTP_ADDR4_ALLOWED; if (asoc->peer.ipv4_address) flags |= SCTP_ADDR4_PEERSUPP; if (asoc->peer.ipv6_address) diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 53bc61537f44..701c5a4e441d 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -461,6 +461,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest, * well as the remote peer. */ if ((((AF_INET == addr->sa.sa_family) && + (flags & SCTP_ADDR4_ALLOWED) && (flags & SCTP_ADDR4_PEERSUPP))) || (((AF_INET6 == addr->sa.sa_family) && (flags & SCTP_ADDR6_ALLOWED) && diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 237c88eeb538..981c7cbca46a 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -148,7 +148,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp, * sock as well as the remote peer. */ if (addr->a.sa.sa_family == AF_INET && - !(copy_flags & SCTP_ADDR4_PEERSUPP)) + (!(copy_flags & SCTP_ADDR4_ALLOWED) || + !(copy_flags & SCTP_ADDR4_PEERSUPP))) continue; if (addr->a.sa.sa_family == AF_INET6 && (!(copy_flags & SCTP_ADDR6_ALLOWED) || diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 58fe6556cdf5..3a11212bb4c0 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -8176,8 +8176,6 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) pr_debug("%s: begins, snum:%d\n", __func__, snum); - local_bh_disable(); - if (snum == 0) { /* Search for an available port. */ int low, high, remaining, index; @@ -8196,20 +8194,21 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) continue; index = sctp_phashfn(sock_net(sk), rover); head = &sctp_port_hashtable[index]; - spin_lock(&head->lock); + spin_lock_bh(&head->lock); sctp_for_each_hentry(pp, &head->chain) if ((pp->port == rover) && net_eq(sock_net(sk), pp->net)) goto next; break; next: - spin_unlock(&head->lock); + spin_unlock_bh(&head->lock); + cond_resched(); } while (--remaining > 0); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) - goto fail; + return ret; /* OK, here is the one we will use. HEAD (the port * hash table list entry) is non-NULL and we hold it's @@ -8224,7 +8223,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) * port iterator, pp being NULL. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; - spin_lock(&head->lock); + spin_lock_bh(&head->lock); sctp_for_each_hentry(pp, &head->chain) { if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) goto pp_found; @@ -8324,10 +8323,7 @@ success: ret = 0; fail_unlock: - spin_unlock(&head->lock); - -fail: - local_bh_enable(); + spin_unlock_bh(&head->lock); return ret; } diff --git a/net/sctp/stream.c b/net/sctp/stream.c index c1a100d2fed3..cd20638b6151 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -22,17 +22,11 @@ #include #include -/* Migrates chunks from stream queues to new stream queues if needed, - * but not across associations. Also, removes those chunks to streams - * higher than the new max. - */ -static void sctp_stream_outq_migrate(struct sctp_stream *stream, - struct sctp_stream *new, __u16 outcnt) +static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt) { struct sctp_association *asoc; struct sctp_chunk *ch, *temp; struct sctp_outq *outq; - int i; asoc = container_of(stream, struct sctp_association, stream); outq = &asoc->outqueue; @@ -56,6 +50,19 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream, sctp_chunk_free(ch); } +} + +/* Migrates chunks from stream queues to new stream queues if needed, + * but not across associations. Also, removes those chunks to streams + * higher than the new max. + */ +static void sctp_stream_outq_migrate(struct sctp_stream *stream, + struct sctp_stream *new, __u16 outcnt) +{ + int i; + + if (stream->outcnt > outcnt) + sctp_stream_shrink_out(stream, outcnt); if (new) { /* Here we actually move the old ext stuff into the new @@ -81,12 +88,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, int ret; if (outcnt <= stream->outcnt) - return 0; + goto out; ret = genradix_prealloc(&stream->out, outcnt, gfp); if (ret) return ret; +out: stream->outcnt = outcnt; return 0; } @@ -97,12 +105,13 @@ static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt, int ret; if (incnt <= stream->incnt) - return 0; + goto out; ret = genradix_prealloc(&stream->in, incnt, gfp); if (ret) return ret; +out: stream->incnt = incnt; return 0; } @@ -1038,11 +1047,13 @@ struct sctp_chunk *sctp_process_strreset_resp( nums = ntohs(addstrm->number_of_streams); number = stream->outcnt - nums; - if (result == SCTP_STRRESET_PERFORMED) + if (result == SCTP_STRRESET_PERFORMED) { for (i = number; i < stream->outcnt; i++) SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; - else + } else { + sctp_stream_shrink_out(stream, number); stream->outcnt = number; + } *evp = sctp_ulpevent_make_stream_change_event(asoc, flags, 0, nums, GFP_ATOMIC); diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index e1f64f4ba236..da9ba6d1679b 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -170,13 +170,15 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) && !list_empty(&smc->conn.lgr->list)) { struct smc_connection *conn = &smc->conn; - struct smcd_diag_dmbinfo dinfo = { - .linkid = *((u32 *)conn->lgr->id), - .peer_gid = conn->lgr->peer_gid, - .my_gid = conn->lgr->smcd->local_gid, - .token = conn->rmb_desc->token, - .peer_token = conn->peer_token - }; + struct smcd_diag_dmbinfo dinfo; + + memset(&dinfo, 0, sizeof(dinfo)); + + dinfo.linkid = *((u32 *)conn->lgr->id); + dinfo.peer_gid = conn->lgr->peer_gid; + dinfo.my_gid = conn->lgr->smcd->local_gid; + dinfo.token = conn->rmb_desc->token; + dinfo.peer_token = conn->peer_token; if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0) goto errout; diff --git a/net/socket.c b/net/socket.c index 432800b39ddb..d1a0264401b7 100644 --- a/net/socket.c +++ b/net/socket.c @@ -485,7 +485,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) if (f.file) { sock = sock_from_file(f.file, err); if (likely(sock)) { - *fput_needed = f.flags; + *fput_needed = f.flags & FDPUT_FPUT; return sock; } fdput(f); diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c index d024af4be85e..105d17af4abc 100644 --- a/net/sunrpc/addr.c +++ b/net/sunrpc/addr.c @@ -82,11 +82,11 @@ static size_t rpc_ntop6(const struct sockaddr *sap, rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id); - if (unlikely((size_t)rc > sizeof(scopebuf))) + if (unlikely((size_t)rc >= sizeof(scopebuf))) return 0; len += rc; - if (unlikely(len > buflen)) + if (unlikely(len >= buflen)) return 0; strcat(buf, scopebuf); diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 683755d95075..78ad41656996 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -584,7 +584,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len, buf->head[0].iov_len); memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen); buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; - buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip; + buf->len = len - (GSS_KRB5_TOK_HDR_LEN + headskip); /* Trim off the trailing "extra count" and checksum blob */ xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip); diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 8fa924c8e282..9314999bf095 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -36,6 +36,8 @@ gss_mech_free(struct gss_api_mech *gm) for (i = 0; i < gm->gm_pf_num; i++) { pf = &gm->gm_pfs[i]; + if (pf->domain) + auth_domain_put(pf->domain); kfree(pf->auth_domain_name); pf->auth_domain_name = NULL; } @@ -58,6 +60,7 @@ make_auth_domain_name(char *name) static int gss_mech_svc_setup(struct gss_api_mech *gm) { + struct auth_domain *dom; struct pf_desc *pf; int i, status; @@ -67,10 +70,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm) status = -ENOMEM; if (pf->auth_domain_name == NULL) goto out; - status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor, - pf->auth_domain_name); - if (status) + dom = svcauth_gss_register_pseudoflavor( + pf->pseudoflavor, pf->auth_domain_name); + if (IS_ERR(dom)) { + status = PTR_ERR(dom); goto out; + } + pf->domain = dom; } return 0; out: diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index d9f7439e2431..3645cd241d3e 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -800,7 +800,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) EXPORT_SYMBOL_GPL(svcauth_gss_flavor); -int +struct auth_domain * svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) { struct gss_domain *new; @@ -817,21 +817,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) new->h.flavour = &svcauthops_gss; new->pseudoflavor = pseudoflavor; - stat = 0; test = auth_domain_lookup(name, &new->h); - if (test != &new->h) { /* Duplicate registration */ + if (test != &new->h) { + pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n", + name); + stat = -EADDRINUSE; auth_domain_put(test); - kfree(new->h.name); - goto out_free_dom; + goto out_free_name; } - return 0; + return test; +out_free_name: + kfree(new->h.name); out_free_dom: kfree(new); out: - return stat; + return ERR_PTR(stat); } - EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor); static inline int @@ -947,7 +949,6 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs maj_stat = gss_unwrap(ctx, 0, priv_len, buf); pad = priv_len - buf->len; - buf->len -= pad; /* The upper layers assume the buffer is aligned on 4-byte boundaries. * In the krb5p case, at least, the data ends up offset, so we need to * move it around. */ diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index b71a39ded930..37792675ed57 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1317,6 +1317,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data) q.len = strlen(gssd_dummy_clnt_dir[0].name); clnt_dentry = d_hash_and_lookup(gssd_dentry, &q); if (!clnt_dentry) { + __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1); pipe_dentry = ERR_PTR(-ENOENT); goto out; } diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 4a020b688860..1db9f62e466d 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -988,8 +988,8 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr, p = xdr_inline_decode(xdr, len); if (unlikely(p == NULL)) goto out_fail; - dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid, - req->rq_task->tk_msg.rpc_proc->p_name, (char *)p); + dprintk("RPC: %5u RPCB_%s reply: %*pE\n", req->rq_task->tk_pid, + req->rq_task->tk_msg.rpc_proc->p_name, len, (char *)p); if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len, sap, sizeof(address)) == 0) diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h index c9bacb3c930f..82035fa65b8f 100644 --- a/net/sunrpc/sunrpc.h +++ b/net/sunrpc/sunrpc.h @@ -56,4 +56,5 @@ int svc_send_common(struct socket *sock, struct xdr_buf *xdr, int rpc_clients_notifier_register(void); void rpc_clients_notifier_unregister(void); +void auth_domain_cleanup(void); #endif /* _NET_SUNRPC_SUNRPC_H */ diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index f9edaa9174a4..236fadc4a439 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -23,6 +23,7 @@ #include #include +#include "sunrpc.h" #include "netns.h" unsigned int sunrpc_net_id; @@ -131,6 +132,7 @@ cleanup_sunrpc(void) unregister_rpc_pipefs(); rpc_destroy_mempool(); unregister_pernet_subsys(&sunrpc_net_ops); + auth_domain_cleanup(); #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) rpc_unregister_sysctl(); #endif diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index d11b70552c33..f0dcb6d14bbb 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1634,6 +1634,22 @@ u32 svc_max_payload(const struct svc_rqst *rqstp) } EXPORT_SYMBOL_GPL(svc_max_payload); +/** + * svc_encode_read_payload - mark a range of bytes as a READ payload + * @rqstp: svc_rqst to operate on + * @offset: payload's byte offset in rqstp->rq_res + * @length: size of payload, in bytes + * + * Returns zero on success, or a negative errno if a permanent + * error occurred. + */ +int svc_encode_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length) +{ + return rqstp->rq_xprt->xpt_ops->xpo_read_payload(rqstp, offset, length); +} +EXPORT_SYMBOL_GPL(svc_encode_read_payload); + /** * svc_fill_write_vector - Construct data argument for VFS write call * @rqstp: svc_rqst to operate on diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 550b214cb001..998b196b6176 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -19,6 +19,10 @@ #include #include +#include + +#include "sunrpc.h" + #define RPCDBG_FACILITY RPCDBG_AUTH @@ -203,3 +207,26 @@ struct auth_domain *auth_domain_find(char *name) return NULL; } EXPORT_SYMBOL_GPL(auth_domain_find); + +/** + * auth_domain_cleanup - check that the auth_domain table is empty + * + * On module unload the auth_domain_table must be empty. To make it + * easier to catch bugs which don't clean up domains properly, we + * warn if anything remains in the table at cleanup time. + * + * Note that we cannot proactively remove the domains at this stage. + * The ->release() function might be in a module that has already been + * unloaded. + */ + +void auth_domain_cleanup(void) +{ + int h; + struct auth_domain *hp; + + for (h = 0; h < DN_HASHMAX; h++) + hlist_for_each_entry(hp, &auth_domain_table[h], hash) + pr_warn("svc: domain %s still present at module unload.\n", + hp->name); +} diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 4260924ad9db..d52abde51f1b 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -279,6 +279,12 @@ out: return len; } +static int svc_sock_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length) +{ + return 0; +} + /* * Report socket names for nfsdfs */ @@ -655,6 +661,7 @@ static const struct svc_xprt_ops svc_udp_ops = { .xpo_create = svc_udp_create, .xpo_recvfrom = svc_udp_recvfrom, .xpo_sendto = svc_udp_sendto, + .xpo_read_payload = svc_sock_read_payload, .xpo_release_rqst = svc_release_udp_skb, .xpo_detach = svc_sock_detach, .xpo_free = svc_sock_free, @@ -1175,6 +1182,7 @@ static const struct svc_xprt_ops svc_tcp_ops = { .xpo_create = svc_tcp_create, .xpo_recvfrom = svc_tcp_recvfrom, .xpo_sendto = svc_tcp_sendto, + .xpo_read_payload = svc_sock_read_payload, .xpo_release_rqst = svc_release_skb, .xpo_detach = svc_tcp_sock_detach, .xpo_free = svc_sock_free, diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 451ca7ec321c..7ef37054071f 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1118,6 +1118,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->head[0].iov_len; + subbuf->head[0].iov_base = buf->head[0].iov_base; subbuf->head[0].iov_len = 0; } @@ -1130,6 +1131,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->page_len; + subbuf->pages = buf->pages; + subbuf->page_base = 0; subbuf->page_len = 0; } @@ -1141,6 +1144,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->tail[0].iov_len; + subbuf->tail[0].iov_base = buf->tail[0].iov_base; subbuf->tail[0].iov_len = 0; } diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index ef5102b60589..21970185485f 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -71,7 +71,7 @@ static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs) size = RPCRDMA_HDRLEN_MIN; /* Maximum Read list size */ - size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32); + size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32); /* Minimal Read chunk size */ size += sizeof(__be32); /* segment count */ @@ -96,7 +96,7 @@ static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs) size = RPCRDMA_HDRLEN_MIN; /* Maximum Write list size */ - size = sizeof(__be32); /* segment count */ + size += sizeof(__be32); /* segment count */ size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32); size += sizeof(__be32); /* list discriminator */ @@ -1246,8 +1246,7 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, be32_to_cpup(p), be32_to_cpu(rep->rr_xid)); } - r_xprt->rx_stats.bad_reply_count++; - return -EREMOTEIO; + return -EIO; } /* Perform XID lookup, reconstruction of the RPC reply, and @@ -1284,13 +1283,11 @@ out: spin_unlock(&xprt->queue_lock); return; -/* If the incoming reply terminated a pending RPC, the next - * RPC call will post a replacement receive buffer as it is - * being marshaled. - */ out_badheader: trace_xprtrdma_reply_hdr(rep); r_xprt->rx_stats.bad_reply_count++; + rqst->rq_task->tk_status = status; + status = 0; goto out; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index b8ee91ffedda..d803d814a03a 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -193,6 +193,7 @@ svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma) out: ctxt->rc_page_count = 0; + ctxt->rc_read_payload_length = 0; return ctxt; out_empty: @@ -264,6 +265,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma) { struct svc_rdma_recv_ctxt *ctxt; + if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) + return 0; ctxt = svc_rdma_recv_ctxt_get(rdma); if (!ctxt) return -ENOMEM; diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index a59912e2666d..0bb3f0dca80d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -481,18 +481,19 @@ static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info, vec->iov_len); } -/* Send an xdr_buf's page list by itself. A Write chunk is - * just the page list. a Reply chunk is the head, page list, - * and tail. This function is shared between the two types - * of chunk. +/* Send an xdr_buf's page list by itself. A Write chunk is just + * the page list. A Reply chunk is @xdr's head, page list, and + * tail. This function is shared between the two types of chunk. */ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, - struct xdr_buf *xdr) + struct xdr_buf *xdr, + unsigned int offset, + unsigned long length) { info->wi_xdr = xdr; - info->wi_next_off = 0; + info->wi_next_off = offset - xdr->head[0].iov_len; return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg, - xdr->page_len); + length); } /** @@ -500,6 +501,8 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, * @rdma: controlling RDMA transport * @wr_ch: Write chunk provided by client * @xdr: xdr_buf containing the data payload + * @offset: payload's byte offset in @xdr + * @length: size of payload, in bytes * * Returns a non-negative number of bytes the chunk consumed, or * %-E2BIG if the payload was larger than the Write chunk, @@ -509,19 +512,20 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, * %-EIO if rdma_rw initialization failed (DMA mapping, etc). */ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, - struct xdr_buf *xdr) + struct xdr_buf *xdr, + unsigned int offset, unsigned long length) { struct svc_rdma_write_info *info; int ret; - if (!xdr->page_len) + if (!length) return 0; info = svc_rdma_write_info_alloc(rdma, wr_ch); if (!info) return -ENOMEM; - ret = svc_rdma_send_xdr_pagelist(info, xdr); + ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length); if (ret < 0) goto out_err; @@ -530,7 +534,7 @@ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, goto out_err; trace_svcrdma_encode_write(xdr->page_len); - return xdr->page_len; + return length; out_err: svc_rdma_write_info_free(info); @@ -570,7 +574,9 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, * client did not provide Write chunks. */ if (!writelist && xdr->page_len) { - ret = svc_rdma_send_xdr_pagelist(info, xdr); + ret = svc_rdma_send_xdr_pagelist(info, xdr, + xdr->head[0].iov_len, + xdr->page_len); if (ret < 0) goto out_err; consumed += xdr->page_len; @@ -671,7 +677,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, struct svc_rdma_read_info *info, __be32 *p) { - unsigned int i; int ret; ret = -EINVAL; @@ -694,12 +699,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, info->ri_chunklen += rs_length; } - /* Pages under I/O have been copied to head->rc_pages. - * Prevent their premature release by svc_xprt_release() . - */ - for (i = 0; i < info->ri_readctxt->rc_page_count; i++) - rqstp->rq_pages[i] = NULL; - return ret; } @@ -794,6 +793,26 @@ out: return ret; } +/* Pages under I/O have been copied to head->rc_pages. Ensure they + * are not released by svc_xprt_release() until the I/O is complete. + * + * This has to be done after all Read WRs are constructed to properly + * handle a page that is part of I/O on behalf of two different RDMA + * segments. + * + * Do this only if I/O has been posted. Otherwise, we do indeed want + * svc_xprt_release() to clean things up properly. + */ +static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, + const unsigned int start, + const unsigned int num_pages) +{ + unsigned int i; + + for (i = start; i < num_pages + start; i++) + rqstp->rq_pages[i] = NULL; +} + /** * svc_rdma_recv_read_chunk - Pull a Read chunk from the client * @rdma: controlling RDMA transport @@ -847,6 +866,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, ret = svc_rdma_post_chunk_ctxt(&info->ri_cc); if (ret < 0) goto out_err; + svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count); return 0; out_err: diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 93ff7967389a..217106c66a13 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -856,7 +856,18 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) if (wr_lst) { /* XXX: Presume the client sent only one Write chunk */ - ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr); + unsigned long offset; + unsigned int length; + + if (rctxt->rc_read_payload_length) { + offset = rctxt->rc_read_payload_offset; + length = rctxt->rc_read_payload_length; + } else { + offset = xdr->head[0].iov_len; + length = xdr->page_len; + } + ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset, + length); if (ret < 0) goto err2; svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret); @@ -891,3 +902,30 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) set_bit(XPT_CLOSE, &xprt->xpt_flags); return -ENOTCONN; } + +/** + * svc_rdma_read_payload - special processing for a READ payload + * @rqstp: svc_rqst to operate on + * @offset: payload's byte offset in @xdr + * @length: size of payload, in bytes + * + * Returns zero on success. + * + * For the moment, just record the xdr_buf location of the READ + * payload. svc_rdma_sendto will use that location later when + * we actually send the payload. + */ +int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length) +{ + struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; + + /* XXX: Just one READ payload slot for now, since our + * transport implementation currently supports only one + * Write chunk. + */ + rctxt->rc_read_payload_offset = offset; + rctxt->rc_read_payload_length = length; + + return 0; +} diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 889220f11a70..89a12676c59d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -81,6 +81,7 @@ static const struct svc_xprt_ops svc_rdma_ops = { .xpo_create = svc_rdma_create, .xpo_recvfrom = svc_rdma_recvfrom, .xpo_sendto = svc_rdma_sendto, + .xpo_read_payload = svc_rdma_read_payload, .xpo_release_rqst = svc_rdma_release_rqst, .xpo_detach = svc_rdma_detach, .xpo_free = svc_rdma_free, diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index d4d2928424e2..11be9a84f8de 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -255,8 +255,9 @@ err_out: static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, struct tipc_nl_compat_msg *msg) { - int err; + struct nlmsghdr *nlh; struct sk_buff *arg; + int err; if (msg->req_type && (!msg->req_size || !TLV_CHECK_TYPE(msg->req, msg->req_type))) @@ -285,6 +286,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, return -ENOMEM; } + nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI); + if (!nlh) { + kfree_skb(arg); + kfree_skb(msg->rep); + msg->rep = NULL; + return -EMSGSIZE; + } + nlmsg_end(arg, nlh); + err = __tipc_nl_compat_dumpit(cmd, msg, arg); if (err) { kfree_skb(msg->rep); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index aea951a1f805..5318bb6611ab 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2616,18 +2616,21 @@ static int tipc_shutdown(struct socket *sock, int how) trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " "); __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); - sk->sk_shutdown = SEND_SHUTDOWN; + if (tipc_sk_type_connectionless(sk)) + sk->sk_shutdown = SHUTDOWN_MASK; + else + sk->sk_shutdown = SEND_SHUTDOWN; if (sk->sk_state == TIPC_DISCONNECTING) { /* Discard any unreceived messages */ __skb_queue_purge(&sk->sk_receive_queue); - /* Wake up anyone sleeping in poll */ - sk->sk_state_change(sk); res = 0; } else { res = -ENOTCONN; } + /* Wake up anyone sleeping in poll. */ + sk->sk_state_change(sk); release_sock(sk); return res; diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 1adeb1c0473b..25fca390cdcf 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -549,7 +549,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page, { struct tls_context *tls_ctx = tls_get_ctx(sk); struct iov_iter msg_iter; - char *kaddr = kmap(page); + char *kaddr; struct kvec iov; int rc; @@ -564,6 +564,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page, goto out; } + kaddr = kmap(page); iov.iov_base = kaddr + offset; iov.iov_len = size; iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size); diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index fbf6a496ee8b..70b203e5d5fd 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1737,6 +1737,7 @@ int tls_sw_recvmsg(struct sock *sk, long timeo; bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); bool is_peek = flags & MSG_PEEK; + bool bpf_strp_enabled; int num_async = 0; int pending; @@ -1747,6 +1748,7 @@ int tls_sw_recvmsg(struct sock *sk, psock = sk_psock_get(sk); lock_sock(sk); + bpf_strp_enabled = sk_psock_strp_enabled(psock); /* Process pending decrypted records. It must be non-zero-copy */ err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false, @@ -1800,11 +1802,12 @@ int tls_sw_recvmsg(struct sock *sk, if (to_decrypt <= len && !is_kvec && !is_peek && ctx->control == TLS_RECORD_TYPE_DATA && - prot->version != TLS_1_3_VERSION) + prot->version != TLS_1_3_VERSION && + !bpf_strp_enabled) zc = true; /* Do not use async mode if record is non-data */ - if (ctx->control == TLS_RECORD_TYPE_DATA) + if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled) async_capable = ctx->async_capable; else async_capable = false; @@ -1854,6 +1857,19 @@ int tls_sw_recvmsg(struct sock *sk, goto pick_next_record; if (!zc) { + if (bpf_strp_enabled) { + err = sk_psock_tls_strp_read(psock, skb); + if (err != __SK_PASS) { + rxm->offset = rxm->offset + rxm->full_len; + rxm->full_len = 0; + if (err == __SK_DROP) + consume_skb(skb); + ctx->recv_pkt = NULL; + __strp_unpause(&ctx->strp); + continue; + } + } + if (rxm->full_len > len) { retain_skb = true; chunk = len; diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 082a30936690..861ec9a671f9 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -22,7 +22,7 @@ #include static struct workqueue_struct *virtio_vsock_workqueue; -static struct virtio_vsock *the_virtio_vsock; +static struct virtio_vsock __rcu *the_virtio_vsock; static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ struct virtio_vsock { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index b65180e874fb..ec559dbad56e 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4798,7 +4798,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) err = nl80211_parse_he_obss_pd( info->attrs[NL80211_ATTR_HE_OBSS_PD], ¶ms.he_obss_pd); - goto out; + if (err) + goto out; } nl80211_calculate_ap_params(¶ms); @@ -12948,13 +12949,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) if (!wdev_running(wdev)) return -ENETDOWN; } - - if (!vcmd->doit) - return -EOPNOTSUPP; } else { wdev = NULL; } + if (!vcmd->doit) + return -EOPNOTSUPP; + if (info->attrs[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]); len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]); diff --git a/net/wireless/reg.c b/net/wireless/reg.c index b0e7dd04bbe9..7a75db7533f8 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2941,6 +2941,9 @@ int regulatory_hint_user(const char *alpha2, if (WARN_ON(!alpha2)) return -EINVAL; + if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2)) + return -EINVAL; + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; diff --git a/net/wireless/util.c b/net/wireless/util.c index 82cf62d5156e..6c430d9df59a 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -116,11 +116,13 @@ int ieee80211_frequency_to_channel(int freq) return (freq - 2407) / 5; else if (freq >= 4910 && freq <= 4980) return (freq - 4000) / 5; - else if (freq < 5945) + else if (freq < 5925) return (freq - 5000) / 5; + else if (freq == 5935) + return 2; else if (freq <= 45000) /* DMG band lower limit */ - /* see 802.11ax D4.1 27.3.22.2 */ - return (freq - 5940) / 5; + /* see 802.11ax D6.1 27.3.22.2 */ + return (freq - 5950) / 5; else if (freq >= 58320 && freq <= 70200) return (freq - 56160) / 2160; else diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index 8aa415a38814..0285aaa1e93c 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c @@ -357,6 +357,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause, sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } + if (x25->neighbour) { + read_lock_bh(&x25_list_lock); + x25_neigh_put(x25->neighbour); + x25->neighbour = NULL; + read_unlock_bh(&x25_list_lock); + } } /* diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 7181a30666b4..f9eb5efb237c 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -362,10 +362,8 @@ static int xsk_generic_xmit(struct sock *sk) len = desc.len; skb = sock_alloc_send_skb(sk, len, 1, &err); - if (unlikely(!skb)) { - err = -EAGAIN; + if (unlikely(!skb)) goto out; - } skb_put(skb, len); addr = desc.addr; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index c365b918be35..bb2292b5260c 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -82,7 +82,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur struct xfrm_offload *xo = xfrm_offload(skb); struct sec_path *sp; - if (!xo) + if (!xo || (xo->flags & XFRM_XMIT)) return skb; if (!(features & NETIF_F_HW_ESP)) @@ -103,6 +103,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return skb; } + xo->flags |= XFRM_XMIT; + if (skb_is_gso(skb)) { struct net_device *dev = skb->dev; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 6a1a21ae47bb..2917711ff8ab 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1430,14 +1430,10 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, spin_unlock_bh(&pq->hold_queue.lock); } -static bool xfrm_policy_mark_match(struct xfrm_policy *policy, - struct xfrm_policy *pol) +static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark, + struct xfrm_policy *pol) { - if (policy->mark.v == pol->mark.v && - policy->priority == pol->priority) - return true; - - return false; + return mark->v == pol->mark.v && mark->m == pol->mark.m; } static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed) @@ -1500,7 +1496,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, if (pol->type == policy->type && pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && - xfrm_policy_mark_match(policy, pol) && + xfrm_policy_mark_match(&policy->mark, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { delpol = pol; @@ -1535,7 +1531,7 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain, if (pol->type == policy->type && pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && - xfrm_policy_mark_match(policy, pol) && + xfrm_policy_mark_match(&policy->mark, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { if (excl) @@ -1607,9 +1603,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) EXPORT_SYMBOL(xfrm_policy_insert); static struct xfrm_policy * -__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, - u8 type, int dir, - struct xfrm_selector *sel, +__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark, + u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx) { struct xfrm_policy *pol; @@ -1620,7 +1615,7 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, hlist_for_each_entry(pol, chain, bydst) { if (pol->type == type && pol->if_id == if_id && - (mark & pol->mark.m) == pol->mark.v && + xfrm_policy_mark_match(mark, pol) && !selector_cmp(sel, &pol->selector) && xfrm_sec_ctx_match(ctx, pol->security)) return pol; @@ -1629,11 +1624,10 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, return NULL; } -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, - struct xfrm_selector *sel, - struct xfrm_sec_ctx *ctx, int delete, - int *err) +struct xfrm_policy * +xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, struct xfrm_selector *sel, + struct xfrm_sec_ctx *ctx, int delete, int *err) { struct xfrm_pol_inexact_bin *bin = NULL; struct xfrm_policy *pol, *ret = NULL; @@ -1700,9 +1694,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, } EXPORT_SYMBOL(xfrm_policy_bysel_ctx); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, u32 id, int delete, - int *err) +struct xfrm_policy * +xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, u32 id, int delete, int *err) { struct xfrm_policy *pol, *ret; struct hlist_head *chain; @@ -1717,8 +1711,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, ret = NULL; hlist_for_each_entry(pol, chain, byidx) { if (pol->type == type && pol->index == id && - pol->if_id == if_id && - (mark & pol->mark.m) == pol->mark.v) { + pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) { xfrm_pol_hold(pol); if (delete) { *err = security_xfrm_policy_delete( diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e6cfaa680ef3..fbb7d9d06478 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1863,7 +1863,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct km_event c; int delete; struct xfrm_mark m; - u32 mark = xfrm_mark_get(attrs, &m); u32 if_id = 0; p = nlmsg_data(nlh); @@ -1880,8 +1879,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + xfrm_mark_get(attrs, &m); + if (p->index) - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err); + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, + p->index, delete, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -1898,8 +1900,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel, - ctx, delete, &err); + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, + &p->sel, ctx, delete, &err); security_xfrm_policy_free(ctx); } if (xp == NULL) @@ -2166,7 +2168,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, u8 type = XFRM_POLICY_TYPE_MAIN; int err = -ENOENT; struct xfrm_mark m; - u32 mark = xfrm_mark_get(attrs, &m); u32 if_id = 0; err = copy_from_user_policy_type(&type, attrs); @@ -2180,8 +2181,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + xfrm_mark_get(attrs, &m); + if (p->index) - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err); + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index, + 0, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -2198,7 +2202,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, &p->sel, ctx, 0, &err); security_xfrm_policy_free(ctx); } diff --git a/samples/bpf/fds_example.c b/samples/bpf/fds_example.c index 2d4b717726b6..34b3fca788e8 100644 --- a/samples/bpf/fds_example.c +++ b/samples/bpf/fds_example.c @@ -30,6 +30,8 @@ #define BPF_M_MAP 1 #define BPF_M_PROG 2 +char bpf_log_buf[BPF_LOG_BUF_SIZE]; + static void usage(void) { printf("Usage: fds_example [...]\n"); @@ -57,7 +59,6 @@ static int bpf_prog_create(const char *object) BPF_EXIT_INSN(), }; size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn); - char bpf_log_buf[BPF_LOG_BUF_SIZE]; struct bpf_object *obj; int prog_fd; diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c index dd558cbb2309..ef53b93db573 100644 --- a/samples/bpf/xdp_monitor_user.c +++ b/samples/bpf/xdp_monitor_user.c @@ -509,11 +509,8 @@ static void *alloc_rec_per_cpu(int record_size) { unsigned int nr_cpus = bpf_num_possible_cpus(); void *array; - size_t size; - size = record_size * nr_cpus; - array = malloc(size); - memset(array, 0, size); + array = calloc(nr_cpus, record_size); if (!array) { fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus); exit(EXIT_FAIL_MEM); @@ -528,8 +525,7 @@ static struct stats_record *alloc_stats_record(void) int i; /* Alloc main stats_record structure */ - rec = malloc(sizeof(*rec)); - memset(rec, 0, sizeof(*rec)); + rec = calloc(1, sizeof(*rec)); if (!rec) { fprintf(stderr, "Mem alloc error\n"); exit(EXIT_FAIL_MEM); diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c index cfcc31e51197..d94a999b4b4b 100644 --- a/samples/bpf/xdp_redirect_cpu_kern.c +++ b/samples/bpf/xdp_redirect_cpu_kern.c @@ -15,7 +15,7 @@ #include "bpf_helpers.h" #include "hash_func01.h" -#define MAX_CPUS 64 /* WARNING - sync with _user.c */ +#define MAX_CPUS NR_CPUS /* Special map type that can XDP_REDIRECT frames to another CPU */ struct { diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c index 8b862a7a6c6a..0a7672556822 100644 --- a/samples/bpf/xdp_redirect_cpu_user.c +++ b/samples/bpf/xdp_redirect_cpu_user.c @@ -13,6 +13,7 @@ static const char *__doc__ = #include #include #include +#include #include #include #include @@ -24,8 +25,6 @@ static const char *__doc__ = #include #include -#define MAX_CPUS 64 /* WARNING - sync with _kern.c */ - /* How many xdp_progs are defined in _kern.c */ #define MAX_PROG 6 @@ -40,6 +39,7 @@ static char *ifname; static __u32 prog_id; static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; +static int n_cpus; static int cpu_map_fd; static int rx_cnt_map_fd; static int redirect_err_cnt_map_fd; @@ -170,7 +170,7 @@ struct stats_record { struct record redir_err; struct record kthread; struct record exception; - struct record enq[MAX_CPUS]; + struct record enq[]; }; static bool map_collect_percpu(int fd, __u32 key, struct record *rec) @@ -210,11 +210,8 @@ static struct datarec *alloc_record_per_cpu(void) { unsigned int nr_cpus = bpf_num_possible_cpus(); struct datarec *array; - size_t size; - size = sizeof(struct datarec) * nr_cpus; - array = malloc(size); - memset(array, 0, size); + array = calloc(nr_cpus, sizeof(struct datarec)); if (!array) { fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus); exit(EXIT_FAIL_MEM); @@ -225,19 +222,20 @@ static struct datarec *alloc_record_per_cpu(void) static struct stats_record *alloc_stats_record(void) { struct stats_record *rec; - int i; + int i, size; - rec = malloc(sizeof(*rec)); - memset(rec, 0, sizeof(*rec)); + size = sizeof(*rec) + n_cpus * sizeof(struct record); + rec = malloc(size); if (!rec) { fprintf(stderr, "Mem alloc error\n"); exit(EXIT_FAIL_MEM); } + memset(rec, 0, size); rec->rx_cnt.cpu = alloc_record_per_cpu(); rec->redir_err.cpu = alloc_record_per_cpu(); rec->kthread.cpu = alloc_record_per_cpu(); rec->exception.cpu = alloc_record_per_cpu(); - for (i = 0; i < MAX_CPUS; i++) + for (i = 0; i < n_cpus; i++) rec->enq[i].cpu = alloc_record_per_cpu(); return rec; @@ -247,7 +245,7 @@ static void free_stats_record(struct stats_record *r) { int i; - for (i = 0; i < MAX_CPUS; i++) + for (i = 0; i < n_cpus; i++) free(r->enq[i].cpu); free(r->exception.cpu); free(r->kthread.cpu); @@ -350,7 +348,7 @@ static void stats_print(struct stats_record *stats_rec, } /* cpumap enqueue stats */ - for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) { + for (to_cpu = 0; to_cpu < n_cpus; to_cpu++) { char *fmt = "%-15s %3d:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n"; char *fm2 = "%-15s %3s:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n"; char *errstr = ""; @@ -475,7 +473,7 @@ static void stats_collect(struct stats_record *rec) map_collect_percpu(fd, 1, &rec->redir_err); fd = cpumap_enqueue_cnt_map_fd; - for (i = 0; i < MAX_CPUS; i++) + for (i = 0; i < n_cpus; i++) map_collect_percpu(fd, i, &rec->enq[i]); fd = cpumap_kthread_cnt_map_fd; @@ -549,10 +547,10 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size, */ static void mark_cpus_unavailable(void) { - __u32 invalid_cpu = MAX_CPUS; + __u32 invalid_cpu = n_cpus; int ret, i; - for (i = 0; i < MAX_CPUS; i++) { + for (i = 0; i < n_cpus; i++) { ret = bpf_map_update_elem(cpus_available_map_fd, &i, &invalid_cpu, 0); if (ret) { @@ -688,6 +686,8 @@ int main(int argc, char **argv) int prog_fd; __u32 qsize; + n_cpus = get_nprocs_conf(); + /* Notice: choosing he queue size is very important with the * ixgbe driver, because it's driver page recycling trick is * dependend on pages being returned quickly. The number of @@ -757,7 +757,7 @@ int main(int argc, char **argv) case 'c': /* Add multiple CPUs */ add_cpu = strtoul(optarg, NULL, 0); - if (add_cpu >= MAX_CPUS) { + if (add_cpu >= n_cpus) { fprintf(stderr, "--cpu nr too large for cpumap err(%d):%s\n", errno, strerror(errno)); diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c index b88df17853b8..21d6e5067a83 100644 --- a/samples/bpf/xdp_rxq_info_user.c +++ b/samples/bpf/xdp_rxq_info_user.c @@ -198,11 +198,8 @@ static struct datarec *alloc_record_per_cpu(void) { unsigned int nr_cpus = bpf_num_possible_cpus(); struct datarec *array; - size_t size; - size = sizeof(struct datarec) * nr_cpus; - array = malloc(size); - memset(array, 0, size); + array = calloc(nr_cpus, sizeof(struct datarec)); if (!array) { fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus); exit(EXIT_FAIL_MEM); @@ -214,11 +211,8 @@ static struct record *alloc_record_per_rxq(void) { unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; struct record *array; - size_t size; - size = sizeof(struct record) * nr_rxqs; - array = malloc(size); - memset(array, 0, size); + array = calloc(nr_rxqs, sizeof(struct record)); if (!array) { fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs); exit(EXIT_FAIL_MEM); @@ -232,8 +226,7 @@ static struct stats_record *alloc_stats_record(void) struct stats_record *rec; int i; - rec = malloc(sizeof(*rec)); - memset(rec, 0, sizeof(*rec)); + rec = calloc(1, sizeof(struct stats_record)); if (!rec) { fprintf(stderr, "Mem alloc error\n"); exit(EXIT_FAIL_MEM); diff --git a/samples/vfs/test-statx.c b/samples/vfs/test-statx.c index a3d68159fb51..507f09c38b49 100644 --- a/samples/vfs/test-statx.c +++ b/samples/vfs/test-statx.c @@ -23,6 +23,8 @@ #include #define statx foo #define statx_timestamp foo_timestamp +struct statx; +struct statx_timestamp; #include #undef statx #undef statx_timestamp diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index d1dd4a6b6adb..7da10afc92c6 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -82,20 +82,21 @@ cc-cross-prefix = $(firstword $(foreach c, $(1), \ $(if $(shell command -v -- $(c)gcc 2>/dev/null), $(c)))) # output directory for tests below -TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/) +TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$ # try-run # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise) # Exit code chooses option. "$$TMP" serves as a temporary file and is # automatically cleaned up. try-run = $(shell set -e; \ - TMP="$(TMPOUT).$$$$.tmp"; \ - TMPO="$(TMPOUT).$$$$.o"; \ + TMP=$(TMPOUT)/tmp; \ + TMPO=$(TMPOUT)/tmp.o; \ + mkdir -p $(TMPOUT); \ + trap "rm -rf $(TMPOUT)" EXIT; \ if ($(1)) >/dev/null 2>&1; \ then echo "$(2)"; \ else echo "$(3)"; \ - fi; \ - rm -f "$$TMP" "$$TMPO") + fi) # as-option # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,) diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 342618a2bccb..a6d0044328b1 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -230,7 +230,7 @@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@ # --------------------------------------------------------------------------- quiet_cmd_gzip = GZIP $@ - cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@ + cmd_gzip = cat $(real-prereqs) | $(KGZIP) -n -f -9 > $@ # DTC # --------------------------------------------------------------------------- @@ -322,19 +322,19 @@ printf "%08x\n" $$dec_size | \ ) quiet_cmd_bzip2 = BZIP2 $@ - cmd_bzip2 = { cat $(real-prereqs) | bzip2 -9; $(size_append); } > $@ + cmd_bzip2 = { cat $(real-prereqs) | $(KBZIP2) -9; $(size_append); } > $@ # Lzma # --------------------------------------------------------------------------- quiet_cmd_lzma = LZMA $@ - cmd_lzma = { cat $(real-prereqs) | lzma -9; $(size_append); } > $@ + cmd_lzma = { cat $(real-prereqs) | $(LZMA) -9; $(size_append); } > $@ quiet_cmd_lzo = LZO $@ - cmd_lzo = { cat $(real-prereqs) | lzop -9; $(size_append); } > $@ + cmd_lzo = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@ quiet_cmd_lz4 = LZ4 $@ - cmd_lz4 = { cat $(real-prereqs) | lz4c -l -c1 stdin stdout; \ + cmd_lz4 = { cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout; \ $(size_append); } > $@ # U-Boot mkimage @@ -381,7 +381,7 @@ quiet_cmd_xzkern = XZKERN $@ $(size_append); } > $@ quiet_cmd_xzmisc = XZMISC $@ - cmd_xzmisc = cat $(real-prereqs) | xz --check=crc32 --lzma2=dict=1MiB > $@ + cmd_xzmisc = cat $(real-prereqs) | $(XZ) --check=crc32 --lzma2=dict=1MiB > $@ # ASM offsets # --------------------------------------------------------------------------- diff --git a/scripts/Makefile.package b/scripts/Makefile.package index 56eadcc48d46..35a617c29611 100644 --- a/scripts/Makefile.package +++ b/scripts/Makefile.package @@ -45,7 +45,7 @@ if test "$(objtree)" != "$(srctree)"; then \ false; \ fi ; \ $(srctree)/scripts/setlocalversion --save-scmversion; \ -tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \ +tar -I $(KGZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \ --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \ rm -f $(objtree)/.scmversion @@ -127,9 +127,9 @@ util/PERF-VERSION-GEN $(CURDIR)/$(perf-tar)/); \ tar rf $(perf-tar).tar $(perf-tar)/HEAD $(perf-tar)/PERF-VERSION-FILE; \ rm -r $(perf-tar); \ $(if $(findstring tar-src,$@),, \ -$(if $(findstring bz2,$@),bzip2, \ -$(if $(findstring gz,$@),gzip, \ -$(if $(findstring xz,$@),xz, \ +$(if $(findstring bz2,$@),$(KBZIP2), \ +$(if $(findstring gz,$@),$(KGZIP), \ +$(if $(findstring xz,$@),$(XZ), \ $(error unknown target $@)))) \ -f -9 $(perf-tar).tar) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 6fcc66afb088..0c9b11420279 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2576,8 +2576,8 @@ sub process { # Check if the commit log has what seems like a diff which can confuse patch if ($in_commit_log && !$commit_log_has_diff && - (($line =~ m@^\s+diff\b.*a/[\w/]+@ && - $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) || + (($line =~ m@^\s+diff\b.*a/([\w/]+)@ && + $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) || $line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ || $line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) { ERROR("DIFF_IN_COMMIT_MSG", diff --git a/scripts/coccinelle/misc/add_namespace.cocci b/scripts/coccinelle/misc/add_namespace.cocci index 99e93a6c2e24..cbf1614163cb 100644 --- a/scripts/coccinelle/misc/add_namespace.cocci +++ b/scripts/coccinelle/misc/add_namespace.cocci @@ -6,6 +6,7 @@ /// add a missing namespace tag to a module source file. /// +virtual nsdeps virtual report @has_ns_import@ @@ -16,10 +17,15 @@ MODULE_IMPORT_NS(ns); // Add missing imports, but only adjacent to a MODULE_LICENSE statement. // That ensures we are adding it only to the main module source file. -@do_import depends on !has_ns_import@ +@do_import depends on !has_ns_import && nsdeps@ declarer name MODULE_LICENSE; expression license; identifier virtual.ns; @@ MODULE_LICENSE(license); + MODULE_IMPORT_NS(ns); + +// Dummy rule for report mode that would otherwise be empty and make spatch +// fail ("No rules apply.") +@script:python depends on report@ +@@ diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh index 13e5fbafdf2f..fe7076fdac8a 100755 --- a/scripts/decode_stacktrace.sh +++ b/scripts/decode_stacktrace.sh @@ -84,8 +84,8 @@ parse_symbol() { return fi - # Strip out the base of the path - code=${code#$basepath/} + # Strip out the base of the path on each line + code=$(while read -r line; do echo "${line#$basepath/}"; done <<< "$code") # In the case of inlines, move everything to same line code=${code//$'\n'/' '} diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py index be984aa29b75..1be9763cf8bb 100644 --- a/scripts/gdb/linux/symbols.py +++ b/scripts/gdb/linux/symbols.py @@ -96,7 +96,7 @@ lx-symbols command.""" return "" attrs = sect_attrs['attrs'] section_name_to_address = { - attrs[n]['name'].string(): attrs[n]['address'] + attrs[n]['battr']['attr']['name'].string(): attrs[n]['address'] for n in range(int(sect_attrs['nsections']))} args = [] for section_name in [".data", ".data..read_mostly", ".rodata", ".bss", diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh index a07668a5c36b..94a833597a88 100755 --- a/scripts/headers_install.sh +++ b/scripts/headers_install.sh @@ -64,7 +64,7 @@ configs=$(sed -e ' d ' $OUTFILE) -# The entries in the following list are not warned. +# The entries in the following list do not result in an error. # Please do not add a new entry. This list is only for existing ones. # The list will be reduced gradually, and deleted eventually. (hopefully) # @@ -98,18 +98,19 @@ include/uapi/linux/raw.h:CONFIG_MAX_RAW_DEVS for c in $configs do - warn=1 + leak_error=1 for ignore in $config_leak_ignores do if echo "$INFILE:$c" | grep -q "$ignore$"; then - warn= + leak_error= break fi done - if [ "$warn" = 1 ]; then - echo "warning: $INFILE: leak $c to user-space" >&2 + if [ "$leak_error" = 1 ]; then + echo "error: $INFILE: leak $c to user-space" >&2 + exit 1 fi done diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc index 0f8c77f84711..a94909ad9a53 100644 --- a/scripts/kconfig/qconf.cc +++ b/scripts/kconfig/qconf.cc @@ -869,40 +869,40 @@ void ConfigList::focusInEvent(QFocusEvent *e) void ConfigList::contextMenuEvent(QContextMenuEvent *e) { - if (e->y() <= header()->geometry().bottom()) { - if (!headerPopup) { - QAction *action; + if (!headerPopup) { + QAction *action; - headerPopup = new QMenu(this); - action = new QAction("Show Name", this); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), - parent(), SLOT(setShowName(bool))); - connect(parent(), SIGNAL(showNameChanged(bool)), - action, SLOT(setOn(bool))); - action->setChecked(showName); - headerPopup->addAction(action); - action = new QAction("Show Range", this); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), - parent(), SLOT(setShowRange(bool))); - connect(parent(), SIGNAL(showRangeChanged(bool)), - action, SLOT(setOn(bool))); - action->setChecked(showRange); - headerPopup->addAction(action); - action = new QAction("Show Data", this); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), - parent(), SLOT(setShowData(bool))); - connect(parent(), SIGNAL(showDataChanged(bool)), - action, SLOT(setOn(bool))); - action->setChecked(showData); - headerPopup->addAction(action); - } - headerPopup->exec(e->globalPos()); - e->accept(); - } else - e->ignore(); + headerPopup = new QMenu(this); + action = new QAction("Show Name", this); + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), + parent(), SLOT(setShowName(bool))); + connect(parent(), SIGNAL(showNameChanged(bool)), + action, SLOT(setChecked(bool))); + action->setChecked(showName); + headerPopup->addAction(action); + + action = new QAction("Show Range", this); + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), + parent(), SLOT(setShowRange(bool))); + connect(parent(), SIGNAL(showRangeChanged(bool)), + action, SLOT(setChecked(bool))); + action->setChecked(showRange); + headerPopup->addAction(action); + + action = new QAction("Show Data", this); + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), + parent(), SLOT(setShowData(bool))); + connect(parent(), SIGNAL(showDataChanged(bool)), + action, SLOT(setChecked(bool))); + action->setChecked(showData); + headerPopup->addAction(action); + } + + headerPopup->exec(e->globalPos()); + e->accept(); } ConfigView*ConfigView::viewList; @@ -1228,7 +1228,7 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos) action->setCheckable(true); connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); - connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool))); + connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setChecked(bool))); action->setChecked(showDebug()); popup->addSeparator(); popup->addAction(action); diff --git a/scripts/mksysmap b/scripts/mksysmap index a35acc0d0b82..9aa23d15862a 100755 --- a/scripts/mksysmap +++ b/scripts/mksysmap @@ -41,4 +41,4 @@ # so we just ignore them to let readprofile continue to work. # (At least sparc64 has __crc_ in the middle). -$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( .L\)' > $2 +$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2 diff --git a/scripts/nsdeps b/scripts/nsdeps index 04cea0921673..e547f33b96a6 100644 --- a/scripts/nsdeps +++ b/scripts/nsdeps @@ -23,7 +23,7 @@ fi generate_deps_for_ns() { $SPATCH --very-quiet --in-place --sp-file \ - $srctree/scripts/coccinelle/misc/add_namespace.cocci -D ns=$1 $2 + $srctree/scripts/coccinelle/misc/add_namespace.cocci -D nsdeps -D ns=$1 $2 } generate_deps() { diff --git a/scripts/package/buildtar b/scripts/package/buildtar index 2f66c81e4021..3d541cee16ed 100755 --- a/scripts/package/buildtar +++ b/scripts/package/buildtar @@ -28,15 +28,15 @@ case "${1}" in opts= ;; targz-pkg) - opts=--gzip + opts="-I ${KGZIP}" tarball=${tarball}.gz ;; tarbz2-pkg) - opts=--bzip2 + opts="-I ${KBZIP2}" tarball=${tarball}.bz2 ;; tarxz-pkg) - opts=--xz + opts="-I ${XZ}" tarball=${tarball}.xz ;; *) diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 7225107a9aaf..b9c2ee7ab43f 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -42,6 +42,8 @@ #define R_ARM_THM_CALL 10 #define R_ARM_CALL 28 +#define R_AARCH64_CALL26 283 + static int fd_map; /* File descriptor for file being modified. */ static int mmap_failed; /* Boolean flag. */ static char gpfx; /* prefix for global symbol name (sometimes '_') */ @@ -434,6 +436,11 @@ static int arm_is_fake_mcount(Elf32_Rel const *rp) return 1; } +static int arm64_is_fake_mcount(Elf64_Rel const *rp) +{ + return ELF64_R_TYPE(w(rp->r_info)) != R_AARCH64_CALL26; +} + /* 64-bit EM_MIPS has weird ELF64_Rela.r_info. * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40] @@ -547,6 +554,7 @@ static int do_file(char const *const fname) make_nop = make_nop_arm64; rel_type_nop = R_AARCH64_NONE; ideal_nop = ideal_nop4_arm64; + is_fake_mcount64 = arm64_is_fake_mcount; break; case EM_IA_64: reltype = R_IA64_IMM64; break; case EM_MIPS: /* reltype: e_class */ break; diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index 74eab03e31d4..f9b19524da11 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -29,6 +29,11 @@ #undef has_rel_mcount #undef tot_relsize #undef get_mcountsym +#undef find_symtab +#undef get_shnum +#undef set_shnum +#undef get_shstrndx +#undef get_symindex #undef get_sym_str_and_relp #undef do_func #undef Elf_Addr @@ -58,6 +63,11 @@ # define __has_rel_mcount __has64_rel_mcount # define has_rel_mcount has64_rel_mcount # define tot_relsize tot64_relsize +# define find_symtab find_symtab64 +# define get_shnum get_shnum64 +# define set_shnum set_shnum64 +# define get_shstrndx get_shstrndx64 +# define get_symindex get_symindex64 # define get_sym_str_and_relp get_sym_str_and_relp_64 # define do_func do64 # define get_mcountsym get_mcountsym_64 @@ -91,6 +101,11 @@ # define __has_rel_mcount __has32_rel_mcount # define has_rel_mcount has32_rel_mcount # define tot_relsize tot32_relsize +# define find_symtab find_symtab32 +# define get_shnum get_shnum32 +# define set_shnum set_shnum32 +# define get_shstrndx get_shstrndx32 +# define get_symindex get_symindex32 # define get_sym_str_and_relp get_sym_str_and_relp_32 # define do_func do32 # define get_mcountsym get_mcountsym_32 @@ -173,6 +188,67 @@ static int MIPS_is_fake_mcount(Elf_Rel const *rp) return is_fake; } +static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab, + Elf32_Word const *symtab_shndx) +{ + unsigned long offset; + int index; + + if (sym->st_shndx != SHN_XINDEX) + return w2(sym->st_shndx); + + offset = (unsigned long)sym - (unsigned long)symtab; + index = offset / sizeof(*sym); + + return w(symtab_shndx[index]); +} + +static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0) +{ + if (shdr0 && !ehdr->e_shnum) + return w(shdr0->sh_size); + + return w2(ehdr->e_shnum); +} + +static void set_shnum(Elf_Ehdr *ehdr, Elf_Shdr *shdr0, unsigned int new_shnum) +{ + if (new_shnum >= SHN_LORESERVE) { + ehdr->e_shnum = 0; + shdr0->sh_size = w(new_shnum); + } else + ehdr->e_shnum = w2(new_shnum); +} + +static int get_shstrndx(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0) +{ + if (ehdr->e_shstrndx != SHN_XINDEX) + return w2(ehdr->e_shstrndx); + + return w(shdr0->sh_link); +} + +static void find_symtab(Elf_Ehdr *const ehdr, Elf_Shdr const *shdr0, + unsigned const nhdr, Elf32_Word **symtab, + Elf32_Word **symtab_shndx) +{ + Elf_Shdr const *relhdr; + unsigned k; + + *symtab = NULL; + *symtab_shndx = NULL; + + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { + if (relhdr->sh_type == SHT_SYMTAB) + *symtab = (void *)ehdr + relhdr->sh_offset; + else if (relhdr->sh_type == SHT_SYMTAB_SHNDX) + *symtab_shndx = (void *)ehdr + relhdr->sh_offset; + + if (*symtab && *symtab_shndx) + break; + } +} + /* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */ static int append_func(Elf_Ehdr *const ehdr, Elf_Shdr *const shstr, @@ -188,10 +264,12 @@ static int append_func(Elf_Ehdr *const ehdr, char const *mc_name = (sizeof(Elf_Rela) == rel_entsize) ? ".rela__mcount_loc" : ".rel__mcount_loc"; - unsigned const old_shnum = w2(ehdr->e_shnum); uint_t const old_shoff = _w(ehdr->e_shoff); uint_t const old_shstr_sh_size = _w(shstr->sh_size); uint_t const old_shstr_sh_offset = _w(shstr->sh_offset); + Elf_Shdr *const shdr0 = (Elf_Shdr *)(old_shoff + (void *)ehdr); + unsigned int const old_shnum = get_shnum(ehdr, shdr0); + unsigned int const new_shnum = 2 + old_shnum; /* {.rel,}__mcount_loc */ uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size); uint_t new_e_shoff; @@ -201,6 +279,8 @@ static int append_func(Elf_Ehdr *const ehdr, t += (_align & -t); /* word-byte align */ new_e_shoff = t; + set_shnum(ehdr, shdr0, new_shnum); + /* body for new shstrtab */ if (ulseek(sb.st_size, SEEK_SET) < 0) return -1; @@ -255,7 +335,6 @@ static int append_func(Elf_Ehdr *const ehdr, return -1; ehdr->e_shoff = _w(new_e_shoff); - ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum)); /* {.rel,}__mcount_loc */ if (ulseek(0, SEEK_SET) < 0) return -1; if (uwrite(ehdr, sizeof(*ehdr)) < 0) @@ -434,6 +513,8 @@ static int find_secsym_ndx(unsigned const txtndx, uint_t *const recvalp, unsigned int *sym_index, Elf_Shdr const *const symhdr, + Elf32_Word const *symtab, + Elf32_Word const *symtab_shndx, Elf_Ehdr const *const ehdr) { Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset) @@ -445,7 +526,7 @@ static int find_secsym_ndx(unsigned const txtndx, for (symp = sym0, t = nsym; t; --t, ++symp) { unsigned int const st_bind = ELF_ST_BIND(symp->st_info); - if (txtndx == w2(symp->st_shndx) + if (txtndx == get_symindex(symp, symtab, symtab_shndx) /* avoid STB_WEAK */ && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) { /* function symbols on ARM have quirks, avoid them */ @@ -516,21 +597,23 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0, return totrelsz; } - /* Overall supervision for Elf32 ET_REL file. */ static int do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) { Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + (void *)ehdr); - unsigned const nhdr = w2(ehdr->e_shnum); - Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)]; + unsigned const nhdr = get_shnum(ehdr, shdr0); + Elf_Shdr *const shstr = &shdr0[get_shstrndx(ehdr, shdr0)]; char const *const shstrtab = (char const *)(_w(shstr->sh_offset) + (void *)ehdr); Elf_Shdr const *relhdr; unsigned k; + Elf32_Word *symtab; + Elf32_Word *symtab_shndx; + /* Upper bound on space: assume all relevant relocs are for mcount. */ unsigned totrelsz; @@ -561,6 +644,8 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname, return -1; } + find_symtab(ehdr, shdr0, nhdr, &symtab, &symtab_shndx); + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { char const *const txtname = has_rel_mcount(relhdr, shdr0, shstrtab, fname); @@ -577,6 +662,7 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname, result = find_secsym_ndx(w(relhdr->sh_info), txtname, &recval, &recsym, &shdr0[symsec_sh_link], + symtab, symtab_shndx, ehdr); if (result) goto out; diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh index 7a2d372f4885..76e9cbcfbeab 100755 --- a/scripts/xz_wrap.sh +++ b/scripts/xz_wrap.sh @@ -20,4 +20,4 @@ case $SRCARCH in sparc) BCJ=--sparc ;; esac -exec xz --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB +exec $XZ --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 5dedc0173b02..1a33f490e667 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -935,7 +935,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) * aways results in a further reduction of permissions. */ if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) && - !unconfined(label) && !aa_label_is_subset(new, ctx->nnp)) { + !unconfined(label) && + !aa_label_is_unconfined_subset(new, ctx->nnp)) { error = -EPERM; info = "no new privs"; goto audit; @@ -1213,7 +1214,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags) * reduce restrictions. */ if (task_no_new_privs(current) && !unconfined(label) && - !aa_label_is_subset(new, ctx->nnp)) { + !aa_label_is_unconfined_subset(new, ctx->nnp)) { /* not an apparmor denial per se, so don't log it */ AA_DEBUG("no_new_privs - change_hat denied"); error = -EPERM; @@ -1234,7 +1235,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags) * reduce restrictions. */ if (task_no_new_privs(current) && !unconfined(label) && - !aa_label_is_subset(previous, ctx->nnp)) { + !aa_label_is_unconfined_subset(previous, ctx->nnp)) { /* not an apparmor denial per se, so don't log it */ AA_DEBUG("no_new_privs - change_hat denied"); error = -EPERM; @@ -1429,7 +1430,7 @@ check: * reduce restrictions. */ if (task_no_new_privs(current) && !unconfined(label) && - !aa_label_is_subset(new, ctx->nnp)) { + !aa_label_is_unconfined_subset(new, ctx->nnp)) { /* not an apparmor denial per se, so don't log it */ AA_DEBUG("no_new_privs - change_hat denied"); error = -EPERM; diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h index 47942c4ba7ca..255764ab06e2 100644 --- a/security/apparmor/include/label.h +++ b/security/apparmor/include/label.h @@ -281,6 +281,7 @@ bool aa_label_init(struct aa_label *label, int size, gfp_t gfp); struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp); bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub); +bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub); struct aa_profile *__aa_label_next_not_in_set(struct label_it *I, struct aa_label *set, struct aa_label *sub); diff --git a/security/apparmor/label.c b/security/apparmor/label.c index 470693239e64..5f324d63ceaa 100644 --- a/security/apparmor/label.c +++ b/security/apparmor/label.c @@ -550,6 +550,39 @@ bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub) return __aa_label_next_not_in_set(&i, set, sub) == NULL; } +/** + * aa_label_is_unconfined_subset - test if @sub is a subset of @set + * @set: label to test against + * @sub: label to test if is subset of @set + * + * This checks for subset but taking into account unconfined. IF + * @sub contains an unconfined profile that does not have a matching + * unconfined in @set then this will not cause the test to fail. + * Conversely we don't care about an unconfined in @set that is not in + * @sub + * + * Returns: true if @sub is special_subset of @set + * else false + */ +bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub) +{ + struct label_it i = { }; + struct aa_profile *p; + + AA_BUG(!set); + AA_BUG(!sub); + + if (sub == set) + return true; + + do { + p = __aa_label_next_not_in_set(&i, set, sub); + if (p && !profile_unconfined(p)) + break; + } while (p); + + return p == NULL; +} /** @@ -1531,13 +1564,13 @@ static const char *label_modename(struct aa_ns *ns, struct aa_label *label, label_for_each(i, label, profile) { if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) { - if (profile->mode == APPARMOR_UNCONFINED) + count++; + if (profile == profile->ns->unconfined) /* special case unconfined so stacks with * unconfined don't report as mixed. ie. * profile_foo//&:ns1:unconfined (mixed) */ continue; - count++; if (mode == -1) mode = profile->mode; else if (mode != profile->mode) diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index ec3a928af829..e31965dc6dd1 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -791,7 +791,12 @@ static void apparmor_sk_clone_security(const struct sock *sk, struct aa_sk_ctx *ctx = SK_CTX(sk); struct aa_sk_ctx *new = SK_CTX(newsk); + if (new->label) + aa_put_label(new->label); new->label = aa_get_label(ctx->label); + + if (new->peer) + aa_put_label(new->peer); new->peer = aa_get_label(ctx->peer); } diff --git a/security/apparmor/match.c b/security/apparmor/match.c index 6ccd3734a841..43669403f755 100644 --- a/security/apparmor/match.c +++ b/security/apparmor/match.c @@ -97,6 +97,9 @@ static struct table_header *unpack_table(char *blob, size_t bsize) th.td_flags == YYTD_DATA8)) goto out; + /* if we have a table it must have some entries */ + if (th.td_lolen == 0) + goto out; tsize = table_size(th.td_lolen, th.td_flags); if (bsize < tsize) goto out; @@ -198,6 +201,8 @@ static int verify_dfa(struct aa_dfa *dfa) state_count = dfa->tables[YYTD_ID_BASE]->td_lolen; trans_count = dfa->tables[YYTD_ID_NXT]->td_lolen; + if (state_count == 0) + goto out; for (i = 0; i < state_count; i++) { if (!(BASE_TABLE(dfa)[i] & MATCH_FLAG_DIFF_ENCODE) && (DEFAULT_TABLE(dfa)[i] >= state_count)) diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index fbc2ee6d46fc..ee6bd945f3d6 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -243,7 +243,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, /* Portable EVM signatures must include an IMA hash */ if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present) - return -EPERM; + error = -EPERM; out: kfree(xattr_value); kfree(desc); diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index 838476d780e5..d2054bec4909 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -227,7 +227,7 @@ config IMA_APPRAISE_REQUIRE_POLICY_SIGS config IMA_APPRAISE_BOOTPARAM bool "ima_appraise boot parameter" - depends on IMA_APPRAISE && !IMA_ARCH_POLICY + depends on IMA_APPRAISE default y help This option enables the different "ima_appraise=" modes diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index 3689081aaf38..8173982e00ab 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -36,7 +36,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE #define IMA_EVENT_NAME_LEN_MAX 255 -#define IMA_HASH_BITS 9 +#define IMA_HASH_BITS 10 #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) #define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16 @@ -52,6 +52,7 @@ extern int ima_policy_flag; extern int ima_hash_algo; extern int ima_appraise; extern struct tpm_chip *ima_tpm_chip; +extern const char boot_aggregate_name[]; /* IMA event related data */ struct ima_event_data { @@ -140,7 +141,7 @@ int ima_calc_buffer_hash(const void *buf, loff_t len, int ima_calc_field_array_hash(struct ima_field_data *field_data, struct ima_template_desc *desc, int num_fields, struct ima_digest_data *hash); -int __init ima_calc_boot_aggregate(struct ima_digest_data *hash); +int ima_calc_boot_aggregate(struct ima_digest_data *hash); void ima_add_violation(struct file *file, const unsigned char *filename, struct integrity_iint_cache *iint, const char *op, const char *cause); @@ -175,9 +176,10 @@ struct ima_h_table { }; extern struct ima_h_table ima_htable; -static inline unsigned long ima_hash_key(u8 *digest) +static inline unsigned int ima_hash_key(u8 *digest) { - return hash_long(*digest, IMA_HASH_BITS); + /* there is no point in taking a hash of part of a digest */ + return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE; } #define __ima_hooks(hook) \ @@ -360,6 +362,7 @@ static inline void ima_free_modsig(struct modsig *modsig) #ifdef CONFIG_IMA_LSM_RULES #define security_filter_rule_init security_audit_rule_init +#define security_filter_rule_free security_audit_rule_free #define security_filter_rule_match security_audit_rule_match #else @@ -370,6 +373,10 @@ static inline int security_filter_rule_init(u32 field, u32 op, char *rulestr, return -EINVAL; } +static inline void security_filter_rule_free(void *lsmrule) +{ +} + static inline int security_filter_rule_match(u32 secid, u32 field, u32 op, void *lsmrule) { diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 136ae4e0ee92..23b04c6521b2 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -18,6 +18,12 @@ static int __init default_appraise_setup(char *str) { #ifdef CONFIG_IMA_APPRAISE_BOOTPARAM + if (arch_ima_get_secureboot()) { + pr_info("Secure boot enabled: ignoring ima_appraise=%s boot parameter option", + str); + return 1; + } + if (strncmp(str, "off", 3) == 0) ima_appraise = 0; else if (strncmp(str, "log", 3) == 0) diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index ad6cbbccc8d9..d5ad7b2539c7 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -645,7 +645,7 @@ int ima_calc_buffer_hash(const void *buf, loff_t len, return calc_buffer_shash(buf, len, hash); } -static void __init ima_pcrread(u32 idx, struct tpm_digest *d) +static void ima_pcrread(u32 idx, struct tpm_digest *d) { if (!ima_tpm_chip) return; @@ -655,18 +655,29 @@ static void __init ima_pcrread(u32 idx, struct tpm_digest *d) } /* - * Calculate the boot aggregate hash + * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With + * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with + * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks, + * allowing firmware to configure and enable different banks. + * + * Knowing which TPM bank is read to calculate the boot_aggregate digest + * needs to be conveyed to a verifier. For this reason, use the same + * hash algorithm for reading the TPM PCRs as for calculating the boot + * aggregate digest as stored in the measurement list. */ -static int __init ima_calc_boot_aggregate_tfm(char *digest, - struct crypto_shash *tfm) +static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id, + struct crypto_shash *tfm) { - struct tpm_digest d = { .alg_id = TPM_ALG_SHA1, .digest = {0} }; + struct tpm_digest d = { .alg_id = alg_id, .digest = {0} }; int rc; u32 i; SHASH_DESC_ON_STACK(shash, tfm); shash->tfm = tfm; + pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n", + d.alg_id); + rc = crypto_shash_init(shash); if (rc != 0) return rc; @@ -675,24 +686,48 @@ static int __init ima_calc_boot_aggregate_tfm(char *digest, for (i = TPM_PCR0; i < TPM_PCR8; i++) { ima_pcrread(i, &d); /* now accumulate with current aggregate */ - rc = crypto_shash_update(shash, d.digest, TPM_DIGEST_SIZE); + rc = crypto_shash_update(shash, d.digest, + crypto_shash_digestsize(tfm)); } if (!rc) crypto_shash_final(shash, digest); return rc; } -int __init ima_calc_boot_aggregate(struct ima_digest_data *hash) +int ima_calc_boot_aggregate(struct ima_digest_data *hash) { struct crypto_shash *tfm; - int rc; + u16 crypto_id, alg_id; + int rc, i, bank_idx = -1; + + for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) { + crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id; + if (crypto_id == hash->algo) { + bank_idx = i; + break; + } + + if (crypto_id == HASH_ALGO_SHA256) + bank_idx = i; + + if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1) + bank_idx = i; + } + + if (bank_idx == -1) { + pr_err("No suitable TPM algorithm for boot aggregate\n"); + return 0; + } + + hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id; tfm = ima_alloc_tfm(hash->algo); if (IS_ERR(tfm)) return PTR_ERR(tfm); hash->length = crypto_shash_digestsize(tfm); - rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm); + alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id; + rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm); ima_free_tfm(tfm); diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c index 5d55ade5f3b9..a94177042eaa 100644 --- a/security/integrity/ima/ima_init.c +++ b/security/integrity/ima/ima_init.c @@ -21,13 +21,13 @@ #include "ima.h" /* name for boot aggregate entry */ -static const char boot_aggregate_name[] = "boot_aggregate"; +const char boot_aggregate_name[] = "boot_aggregate"; struct tpm_chip *ima_tpm_chip; /* Add the boot aggregate to the IMA measurement list and extend * the PCR register. * - * Calculate the boot aggregate, a SHA1 over tpm registers 0-7, + * Calculate the boot aggregate, a hash over tpm registers 0-7, * assuming a TPM chip exists, and zeroes if the TPM chip does not * exist. Add the boot aggregate measurement to the measurement * list and extend the PCR register. @@ -51,15 +51,27 @@ static int __init ima_add_boot_aggregate(void) int violation = 0; struct { struct ima_digest_data hdr; - char digest[TPM_DIGEST_SIZE]; + char digest[TPM_MAX_DIGEST_SIZE]; } hash; memset(iint, 0, sizeof(*iint)); memset(&hash, 0, sizeof(hash)); iint->ima_hash = &hash.hdr; - iint->ima_hash->algo = HASH_ALGO_SHA1; - iint->ima_hash->length = SHA1_DIGEST_SIZE; + iint->ima_hash->algo = ima_hash_algo; + iint->ima_hash->length = hash_digest_size[ima_hash_algo]; + /* + * With TPM 2.0 hash agility, TPM chips could support multiple TPM + * PCR banks, allowing firmware to configure and enable different + * banks. The SHA1 bank is not necessarily enabled. + * + * Use the same hash algorithm for reading the TPM PCRs as for + * calculating the boot aggregate digest. Preference is given to + * the configured IMA default hash algorithm. Otherwise, use the + * TCG required banks - SHA256 for TPM 2.0, SHA1 for TPM 1.2. + * Ultimately select SHA1 also for TPM 2.0 if the SHA256 PCR bank + * is not found. + */ if (ima_tpm_chip) { result = ima_calc_boot_aggregate(&hash.hdr); if (result < 0) { diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 60027c643ecd..a768f37a0a4d 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -712,6 +712,9 @@ static int __init init_ima(void) error = ima_init(); } + if (error) + return error; + error = register_blocking_lsm_notifier(&ima_lsm_policy_notifier); if (error) pr_warn("Couldn't register LSM notifier, error %d\n", error); diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index ee9aec5e98f0..e725d4187271 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -204,7 +204,7 @@ static struct ima_rule_entry *arch_policy_entry __ro_after_init; static LIST_HEAD(ima_default_rules); static LIST_HEAD(ima_policy_rules); static LIST_HEAD(ima_temp_rules); -static struct list_head *ima_rules; +static struct list_head *ima_rules = &ima_default_rules; static int ima_policy __initdata; @@ -254,7 +254,7 @@ static void ima_lsm_free_rule(struct ima_rule_entry *entry) int i; for (i = 0; i < MAX_LSM_RULES; i++) { - kfree(entry->lsm[i].rule); + security_filter_rule_free(entry->lsm[i].rule); kfree(entry->lsm[i].args_p); } kfree(entry); @@ -591,9 +591,12 @@ static void add_rules(struct ima_rule_entry *entries, int count, list_add_tail(&entry->list, &ima_policy_rules); } if (entries[i].action == APPRAISE) { - temp_ima_appraise |= ima_appraise_flag(entries[i].func); - if (entries[i].func == POLICY_CHECK) - temp_ima_appraise |= IMA_APPRAISE_POLICY; + if (entries != build_appraise_rules) + temp_ima_appraise |= + ima_appraise_flag(entries[i].func); + else + build_ima_appraise |= + ima_appraise_flag(entries[i].func); } } } @@ -712,7 +715,6 @@ void __init ima_init_policy(void) ARRAY_SIZE(default_appraise_rules), IMA_DEFAULT_POLICY); - ima_rules = &ima_default_rules; ima_update_policy_flag(); } diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c index 32ae05d88257..1be146e17d9f 100644 --- a/security/integrity/ima/ima_template_lib.c +++ b/security/integrity/ima/ima_template_lib.c @@ -288,6 +288,24 @@ int ima_eventdigest_init(struct ima_event_data *event_data, goto out; } + if ((const char *)event_data->filename == boot_aggregate_name) { + if (ima_tpm_chip) { + hash.hdr.algo = HASH_ALGO_SHA1; + result = ima_calc_boot_aggregate(&hash.hdr); + + /* algo can change depending on available PCR banks */ + if (!result && hash.hdr.algo != HASH_ALGO_SHA1) + result = -EINVAL; + + if (result < 0) + memset(&hash, 0, sizeof(hash)); + } + + cur_digest = hash.hdr.digest; + cur_digestsize = hash_digest_size[HASH_ALGO_SHA1]; + goto out; + } + if (!event_data->file) /* missing info to re-calculate the digest */ return -EINVAL; diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c index b2f87015d6e9..3f38583bed06 100644 --- a/security/lockdown/lockdown.c +++ b/security/lockdown/lockdown.c @@ -177,7 +177,7 @@ static int __init lockdown_secfs_init(void) { struct dentry *dentry; - dentry = securityfs_create_file("lockdown", 0600, NULL, NULL, + dentry = securityfs_create_file("lockdown", 0644, NULL, NULL, &lockdown_ops); return PTR_ERR_OR_ZERO(dentry); } diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 1260f5fb766e..dd7aabd94a92 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -2496,6 +2496,7 @@ int policydb_read(struct policydb *p, void *fp) if (rc) goto bad; + rc = -ENOMEM; p->type_attr_map_array = kvcalloc(p->p_types.nprim, sizeof(*p->type_attr_map_array), GFP_KERNEL); diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index a5813c7629c1..f62adf3cfce8 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -2844,8 +2844,12 @@ err: if (*names) { for (i = 0; i < *len; i++) kfree((*names)[i]); + kfree(*names); } kfree(*values); + *len = 0; + *names = NULL; + *values = NULL; goto out; } diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index c21b656b3263..9c4308077574 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -884,7 +884,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, } ret = sscanf(rule, "%d", &maplevel); - if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) + if (ret != 1 || maplevel < 0 || maplevel > SMACK_CIPSO_MAXLEVEL) goto out; rule += SMK_DIGITLEN; @@ -905,6 +905,10 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, for (i = 0; i < catlen; i++) { rule += SMK_DIGITLEN; + if (rule > data + count) { + rc = -EOVERFLOW; + goto out; + } ret = sscanf(rule, "%u", &cat); if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM) goto out; @@ -2720,7 +2724,6 @@ static int smk_open_relabel_self(struct inode *inode, struct file *file) static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - struct task_smack *tsp = smack_cred(current_cred()); char *data; int rc; LIST_HEAD(list_tmp); @@ -2745,11 +2748,21 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf, kfree(data); if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) { + struct cred *new; + struct task_smack *tsp; + + new = prepare_creds(); + if (!new) { + rc = -ENOMEM; + goto out; + } + tsp = smack_cred(new); smk_destroy_label_list(&tsp->smk_relabel); list_splice(&list_tmp, &tsp->smk_relabel); + commit_creds(new); return count; } - +out: smk_destroy_label_list(&list_tmp); return rc; } diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index f34ce564d92c..1afa06b80f06 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c @@ -722,6 +722,9 @@ static int snd_compr_stop(struct snd_compr_stream *stream) retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP); if (!retval) { + /* clear flags and stop any drain wait */ + stream->partial_drain = false; + stream->metadata_set = false; snd_compr_drain_notify(stream); stream->runtime->total_bytes_available = 0; stream->runtime->total_bytes_transferred = 0; @@ -879,6 +882,7 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream) if (stream->next_track == false) return -EPERM; + stream->partial_drain = true; retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN); if (retval) { pr_debug("Partial drain returned failure\n"); diff --git a/sound/core/info.c b/sound/core/info.c index e051a029ccfb..f18f4ef6661e 100644 --- a/sound/core/info.c +++ b/sound/core/info.c @@ -608,7 +608,9 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len) { int c = -1; - if (snd_BUG_ON(!buffer || !buffer->buffer)) + if (snd_BUG_ON(!buffer)) + return 1; + if (!buffer->buffer) return 1; if (len <= 0 || buffer->stop || buffer->error) return 1; diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c index 3788906421a7..fe27034f2846 100644 --- a/sound/core/oss/mulaw.c +++ b/sound/core/oss/mulaw.c @@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream *plug, snd_BUG(); return -EINVAL; } - if (snd_BUG_ON(!snd_pcm_format_linear(format->format))) - return -ENXIO; + if (!snd_pcm_format_linear(format->format)) + return -EINVAL; err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion", src_format, dst_format, diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c index 17f913657304..c8b9c0b315d8 100644 --- a/sound/core/seq/oss/seq_oss.c +++ b/sound/core/seq/oss/seq_oss.c @@ -168,10 +168,16 @@ static long odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct seq_oss_devinfo *dp; + long rc; + dp = file->private_data; if (snd_BUG_ON(!dp)) return -ENXIO; - return snd_seq_oss_ioctl(dp, cmd, arg); + + mutex_lock(®ister_mutex); + rc = snd_seq_oss_ioctl(dp, cmd, arg); + mutex_unlock(®ister_mutex); + return rc; } #ifdef CONFIG_COMPAT diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c index e69a4ef0d6bd..08c10ac9d6c8 100644 --- a/sound/drivers/opl3/opl3_synth.c +++ b/sound/drivers/opl3/opl3_synth.c @@ -91,6 +91,8 @@ int snd_opl3_ioctl(struct snd_hwdep * hw, struct file *file, { struct snd_dm_fm_info info; + memset(&info, 0, sizeof(info)); + info.fm_mode = opl3->fm_mode; info.rhythm = opl3->rhythm; if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info))) diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c index 67d735e9a6a4..fea92e148790 100644 --- a/sound/firewire/amdtp-am824.c +++ b/sound/firewire/amdtp-am824.c @@ -82,7 +82,8 @@ int amdtp_am824_set_parameters(struct amdtp_stream *s, unsigned int rate, if (err < 0) return err; - s->ctx_data.rx.fdf = AMDTP_FDF_AM824 | s->sfc; + if (s->direction == AMDTP_OUT_STREAM) + s->ctx_data.rx.fdf = AMDTP_FDF_AM824 | s->sfc; p->pcm_channels = pcm_channels; p->midi_ports = midi_ports; diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c index 1f5fc0e7c024..0e4b0eac3015 100644 --- a/sound/firewire/digi00x/digi00x.c +++ b/sound/firewire/digi00x/digi00x.c @@ -14,6 +14,7 @@ MODULE_LICENSE("GPL v2"); #define VENDOR_DIGIDESIGN 0x00a07e #define MODEL_CONSOLE 0x000001 #define MODEL_RACK 0x000002 +#define SPEC_VERSION 0x000001 static int name_card(struct snd_dg00x *dg00x) { @@ -175,14 +176,18 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = { /* Both of 002/003 use the same ID. */ { .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_VERSION | IEEE1394_MATCH_MODEL_ID, .vendor_id = VENDOR_DIGIDESIGN, + .version = SPEC_VERSION, .model_id = MODEL_CONSOLE, }, { .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_VERSION | IEEE1394_MATCH_MODEL_ID, .vendor_id = VENDOR_DIGIDESIGN, + .version = SPEC_VERSION, .model_id = MODEL_RACK, }, {} diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c index addc464503bc..0175e3e835ea 100644 --- a/sound/firewire/tascam/tascam.c +++ b/sound/firewire/tascam/tascam.c @@ -39,9 +39,6 @@ static const struct snd_tscm_spec model_specs[] = { .midi_capture_ports = 2, .midi_playback_ports = 4, }, - // This kernel module doesn't support FE-8 because the most of features - // can be implemented in userspace without any specific support of this - // module. }; static int identify_model(struct snd_tscm *tscm) @@ -211,11 +208,39 @@ static void snd_tscm_remove(struct fw_unit *unit) } static const struct ieee1394_device_id snd_tscm_id_table[] = { + // Tascam, FW-1884. { .match_flags = IEEE1394_MATCH_VENDOR_ID | - IEEE1394_MATCH_SPECIFIER_ID, + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, .vendor_id = 0x00022e, .specifier_id = 0x00022e, + .version = 0x800000, + }, + // Tascam, FE-8 (.version = 0x800001) + // This kernel module doesn't support FE-8 because the most of features + // can be implemented in userspace without any specific support of this + // module. + // + // .version = 0x800002 is unknown. + // + // Tascam, FW-1082. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, + .version = 0x800003, + }, + // Tascam, FW-1804. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, + .version = 0x800004, }, {} }; diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c index c946fd8beebc..b84e12f4f804 100644 --- a/sound/hda/hdac_device.c +++ b/sound/hda/hdac_device.c @@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_device_init); void snd_hdac_device_exit(struct hdac_device *codec) { pm_runtime_put_noidle(&codec->dev); + /* keep balance of runtime PM child_count in parent device */ + pm_runtime_set_suspended(&codec->dev); snd_hdac_bus_remove_device(codec->bus, codec); kfree(codec->vendor_name); kfree(codec->chip_name); diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c index c5b1d5900eed..d6420d224d09 100644 --- a/sound/isa/wavefront/wavefront_synth.c +++ b/sound/isa/wavefront/wavefront_synth.c @@ -1171,7 +1171,10 @@ wavefront_send_alias (snd_wavefront_t *dev, wavefront_patch_info *header) "alias for %d\n", header->number, header->hdr.a.OriginalSample); - + + if (header->number >= WF_MAX_SAMPLE) + return -EINVAL; + munge_int32 (header->number, &alias_hdr[0], 2); munge_int32 (header->hdr.a.OriginalSample, &alias_hdr[2], 2); munge_int32 (*((unsigned int *)&header->hdr.a.sampleStartOffset), @@ -1202,6 +1205,9 @@ wavefront_send_multisample (snd_wavefront_t *dev, wavefront_patch_info *header) int num_samples; unsigned char *msample_hdr; + if (header->number >= WF_MAX_SAMPLE) + return -EINVAL; + msample_hdr = kmalloc(WF_MSAMPLE_BYTES, GFP_KERNEL); if (! msample_hdr) return -ENOMEM; diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c index 478412e0aa3c..7aedaeb7a196 100644 --- a/sound/pci/ca0106/ca0106_main.c +++ b/sound/pci/ca0106/ca0106_main.c @@ -537,7 +537,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 *chip, int channel_id, else /* Power down */ chip->spi_dac_reg[reg] |= bit; - return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]); + if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0) + return -ENXIO; } return 0; } diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c index 5b888b795f7e..c07a9e735733 100644 --- a/sound/pci/cs46xx/cs46xx_lib.c +++ b/sound/pci/cs46xx/cs46xx_lib.c @@ -766,7 +766,7 @@ static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned rate = 48000 / 9; /* - * We can not capture at at rate greater than the Input Rate (48000). + * We can not capture at a rate greater than the Input Rate (48000). * Return an error if an attempt is made to stray outside that limit. */ if (rate > 48000) diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c index 715ead59613d..0bef823c5f61 100644 --- a/sound/pci/cs46xx/dsp_spos_scb_lib.c +++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c @@ -1716,7 +1716,7 @@ int cs46xx_iec958_pre_open (struct snd_cs46xx *chip) struct dsp_spos_instance * ins = chip->dsp_spos_instance; if ( ins->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED ) { - /* remove AsynchFGTxSCB and and PCMSerialInput_II */ + /* remove AsynchFGTxSCB and PCMSerialInput_II */ cs46xx_dsp_disable_spdif_out (chip); /* save state */ diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c index ca9125726be2..8596ae4c2bde 100644 --- a/sound/pci/echoaudio/echoaudio.c +++ b/sound/pci/echoaudio/echoaudio.c @@ -2198,7 +2198,6 @@ static int snd_echo_resume(struct device *dev) if (err < 0) { kfree(commpage_bak); dev_err(dev, "resume init_hw err=%d\n", err); - snd_echo_free(chip); return err; } @@ -2225,7 +2224,6 @@ static int snd_echo_resume(struct device *dev) if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { dev_err(chip->card->dev, "cannot grab irq\n"); - snd_echo_free(chip); return -EBUSY; } chip->irq = pci->irq; diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index 2c6d2becfe1a..824f4ac1a8ce 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -72,6 +72,12 @@ static int compare_input_type(const void *ap, const void *bp) if (a->type != b->type) return (int)(a->type - b->type); + /* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */ + if (a->is_headset_mic && b->is_headphone_mic) + return -1; /* don't swap */ + else if (a->is_headphone_mic && b->is_headset_mic) + return 1; /* swap */ + /* In case one has boost and the other one has not, pick the one with boost first. */ return (int)(b->has_boost_on_pin - a->has_boost_on_pin); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 07c03c32715a..103011e7285a 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2924,6 +2924,10 @@ static int hda_codec_runtime_suspend(struct device *dev) struct hda_codec *codec = dev_to_hda_codec(dev); unsigned int state; + /* Nothing to do if card registration fails and the component driver never probes */ + if (!codec->card) + return 0; + cancel_delayed_work_sync(&codec->jackpoll_work); state = hda_call_codec_suspend(codec); if (codec->link_down_at_suspend || @@ -2938,6 +2942,10 @@ static int hda_codec_runtime_resume(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); + /* Nothing to do if card registration fails and the component driver never probes */ + if (!codec->card) + return 0; + codec_display_power(codec, true); snd_hdac_codec_link_up(&codec->core); hda_call_codec_resume(codec); @@ -3412,7 +3420,7 @@ EXPORT_SYMBOL_GPL(snd_hda_set_power_save); * @nid: NID to check / update * * Check whether the given NID is in the amp list. If it's in the list, - * check the current AMP status, and update the the power-status according + * check the current AMP status, and update the power-status according * to the mute status. * * This function is supposed to be set or called from the check_power_status diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 6815f9dc8545..e1750bdbe51f 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -813,7 +813,7 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path, } } -/* sync power of each widget in the the given path */ +/* sync power of each widget in the given path */ static hda_nid_t path_power_update(struct hda_codec *codec, struct nid_path *path, bool allow_powerdown) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 612441508e80..7353d2ec359a 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2306,7 +2306,6 @@ static int azx_probe_continue(struct azx *chip) if (azx_has_pm_runtime(chip)) { pm_runtime_use_autosuspend(&pci->dev); - pm_runtime_allow(&pci->dev); pm_runtime_put_autosuspend(&pci->dev); } @@ -2433,6 +2432,9 @@ static const struct pci_device_id azx_ids[] = { /* Icelake */ { PCI_DEVICE(0x8086, 0x34c8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Icelake-H */ + { PCI_DEVICE(0x8086, 0x3dc8), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Jasperlake */ { PCI_DEVICE(0x8086, 0x38c8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, @@ -2441,9 +2443,14 @@ static const struct pci_device_id azx_ids[] = { /* Tigerlake */ { PCI_DEVICE(0x8086, 0xa0c8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Tigerlake-H */ + { PCI_DEVICE(0x8086, 0x43c8), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Elkhart Lake */ { PCI_DEVICE(0x8086, 0x4b55), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + { PCI_DEVICE(0x8086, 0x4b58), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Broxton-P(Apollolake) */ { PCI_DEVICE(0x8086, 0x5a98), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON }, diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index e5191584638a..e378cb33c69d 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c @@ -169,6 +169,10 @@ static int __maybe_unused hda_tegra_runtime_suspend(struct device *dev) struct hdac_bus *bus = azx_bus(chip); if (chip && chip->running) { + /* enable controller wake up event */ + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | + STATESTS_INT_MASK); + azx_stop_chip(chip); synchronize_irq(bus->irq); azx_enter_link_reset(chip); @@ -191,6 +195,9 @@ static int __maybe_unused hda_tegra_runtime_resume(struct device *dev) if (chip && chip->running) { hda_tegra_init(hda); azx_init_chip(chip, 1); + /* disable controller wake up event*/ + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & + ~STATESTS_INT_MASK); } return 0; diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 1e904dd15ab3..6aa39339db0a 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -1182,6 +1182,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = { SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI), SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D), + SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D), SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5), {} }; @@ -4670,7 +4671,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec) tmp = FLOAT_ONE; break; case QUIRK_AE5: - ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00); + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00); tmp = FLOAT_THREE; break; default: @@ -4716,7 +4717,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec) r3di_gpio_mic_set(codec, R3DI_REAR_MIC); break; case QUIRK_AE5: - ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00); + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00); break; default: break; @@ -4755,7 +4756,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec) tmp = FLOAT_ONE; break; case QUIRK_AE5: - ca0113_mmio_command_set(codec, 0x48, 0x28, 0x3f); + ca0113_mmio_command_set(codec, 0x30, 0x28, 0x3f); tmp = FLOAT_THREE; break; default: @@ -5747,6 +5748,11 @@ static int ca0132_switch_get(struct snd_kcontrol *kcontrol, return 0; } + if (nid == ZXR_HEADPHONE_GAIN) { + *valp = spec->zxr_gain_set; + return 0; + } + return 0; } diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index d41c91468ab3..51798632d334 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -176,6 +176,7 @@ struct hdmi_spec { bool use_jack_detect; /* jack detection enabled */ bool use_acomp_notifier; /* use eld_notify callback for hotplug */ bool acomp_registered; /* audio component registered in this driver */ + bool force_connect; /* force connectivity */ struct drm_audio_component_audio_ops drm_audio_ops; int (*port2pin)(struct hda_codec *, int); /* reverse port/pin mapping */ @@ -1711,7 +1712,8 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid) * all device entries on the same pin */ config = snd_hda_codec_get_pincfg(codec, pin_nid); - if (get_defcfg_connect(config) == AC_JACK_PORT_NONE) + if (get_defcfg_connect(config) == AC_JACK_PORT_NONE && + !spec->force_connect) return 0; /* @@ -1815,35 +1817,58 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) return 0; } +static const struct snd_pci_quirk force_connect_list[] = { + SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), + SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), + {} +}; + static int hdmi_parse_codec(struct hda_codec *codec) { - hda_nid_t nid; + struct hdmi_spec *spec = codec->spec; + hda_nid_t start_nid; + unsigned int caps; int i, nodes; + const struct snd_pci_quirk *q; - nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &nid); - if (!nid || nodes < 0) { + nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &start_nid); + if (!start_nid || nodes < 0) { codec_warn(codec, "HDMI: failed to get afg sub nodes\n"); return -EINVAL; } - for (i = 0; i < nodes; i++, nid++) { - unsigned int caps; - unsigned int type; + q = snd_pci_quirk_lookup(codec->bus->pci, force_connect_list); + + if (q && q->value) + spec->force_connect = true; + + /* + * hdmi_add_pin() assumes total amount of converters to + * be known, so first discover all converters + */ + for (i = 0; i < nodes; i++) { + hda_nid_t nid = start_nid + i; caps = get_wcaps(codec, nid); - type = get_wcaps_type(caps); if (!(caps & AC_WCAP_DIGITAL)) continue; - switch (type) { - case AC_WID_AUD_OUT: + if (get_wcaps_type(caps) == AC_WID_AUD_OUT) hdmi_add_cvt(codec, nid); - break; - case AC_WID_PIN: + } + + /* discover audio pins */ + for (i = 0; i < nodes; i++) { + hda_nid_t nid = start_nid + i; + + caps = get_wcaps(codec, nid); + + if (!(caps & AC_WCAP_DIGITAL)) + continue; + + if (get_wcaps_type(caps) == AC_WID_PIN) hdmi_add_pin(codec, nid); - break; - } } return 0; @@ -2473,6 +2498,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp, mutex_lock(&spec->bind_lock); spec->use_acomp_notifier = use_acomp; spec->codec->relaxed_resume = use_acomp; + spec->codec->bus->keep_power = 0; /* reprogram each jack detection logic depending on the notifier */ if (spec->use_jack_detect) { for (i = 0; i < spec->num_pins; i++) @@ -2568,7 +2594,6 @@ static void generic_acomp_init(struct hda_codec *codec, if (!snd_hdac_acomp_init(&codec->bus->core, &spec->drm_audio_ops, match_bound_vga, 0)) { spec->acomp_registered = true; - codec->bus->keep_power = 0; } } @@ -2773,6 +2798,7 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec, hda_nid_t cvt_nid) { if (per_pin) { + haswell_verify_D0(codec, per_pin->cvt_nid, per_pin->pin_nid); snd_hda_set_dev_select(codec, per_pin->pin_nid, per_pin->dev_id); intel_verify_pin_cvt_connect(codec, per_pin); @@ -3652,6 +3678,7 @@ static int tegra_hdmi_build_pcms(struct hda_codec *codec) static int patch_tegra_hdmi(struct hda_codec *codec) { + struct hdmi_spec *spec; int err; err = patch_generic_hdmi(codec); @@ -3659,6 +3686,10 @@ static int patch_tegra_hdmi(struct hda_codec *codec) return err; codec->patch_ops.build_pcms = tegra_hdmi_build_pcms; + spec = codec->spec; + spec->chmap.ops.chmap_cea_alloc_validate_get_type = + nvhdmi_chmap_cea_alloc_validate_get_type; + spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; return 0; } @@ -4146,6 +4177,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), @@ -4169,6 +4205,7 @@ HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi), +HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index df5afac0b600..54346ae47d11 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -81,6 +81,7 @@ struct alc_spec { /* mute LED for HP laptops, see alc269_fixup_mic_mute_hook() */ int mute_led_polarity; + int micmute_led_polarity; hda_nid_t mute_led_nid; hda_nid_t cap_mute_led_nid; @@ -2459,6 +2460,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), @@ -4080,11 +4082,9 @@ static void alc269_fixup_hp_mute_led_mic3(struct hda_codec *codec, /* update LED status via GPIO */ static void alc_update_gpio_led(struct hda_codec *codec, unsigned int mask, - bool enabled) + int polarity, bool enabled) { - struct alc_spec *spec = codec->spec; - - if (spec->mute_led_polarity) + if (polarity) enabled = !enabled; alc_update_gpio_data(codec, mask, !enabled); /* muted -> LED on */ } @@ -4095,7 +4095,8 @@ static void alc_fixup_gpio_mute_hook(void *private_data, int enabled) struct hda_codec *codec = private_data; struct alc_spec *spec = codec->spec; - alc_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled); + alc_update_gpio_led(codec, spec->gpio_mute_led_mask, + spec->mute_led_polarity, enabled); } /* turn on/off mic-mute LED via GPIO per capture hook */ @@ -4104,6 +4105,7 @@ static void alc_gpio_micmute_update(struct hda_codec *codec) struct alc_spec *spec = codec->spec; alc_update_gpio_led(codec, spec->gpio_mic_led_mask, + spec->micmute_led_polarity, spec->gen.micmute_led.led_value); } @@ -4389,6 +4391,7 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec, { struct alc_spec *spec = codec->spec; + spec->micmute_led_polarity = 1; alc_fixup_hp_gpio_led(codec, action, 0, 0x04); if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->init_amp = ALC_INIT_DEFAULT; @@ -5808,7 +5811,8 @@ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, snd_hda_gen_hp_automute(codec, jack); /* mute_led_polarity is set to 0, so we pass inverted value here */ - alc_update_gpio_led(codec, 0x10, !spec->gen.hp_jack_present); + alc_update_gpio_led(codec, 0x10, spec->mute_led_polarity, + !spec->gen.hp_jack_present); } /* Manage GPIOs for HP EliteBook Folio 9480m. @@ -5845,6 +5849,39 @@ static void alc275_fixup_gpio4_off(struct hda_codec *codec, } } +/* Quirk for Thinkpad X1 7th and 8th Gen + * The following fixed routing needed + * DAC1 (NID 0x02) -> Speaker (NID 0x14); some eq applied secretly + * DAC2 (NID 0x03) -> Bass (NID 0x17) & Headphone (NID 0x21); sharing a DAC + * DAC3 (NID 0x06) -> Unused, due to the lack of volume amp + */ +static void alc285_fixup_thinkpad_x1_gen7(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */ + static const hda_nid_t preferred_pairs[] = { + 0x14, 0x02, 0x17, 0x03, 0x21, 0x03, 0 + }; + struct alc_spec *spec = codec->spec; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); + spec->gen.preferred_dacs = preferred_pairs; + break; + case HDA_FIXUP_ACT_BUILD: + /* The generic parser creates somewhat unintuitive volume ctls + * with the fixed routing above, and the shared DAC2 may be + * confusing for PA. + * Rename those to unique names so that PA doesn't touch them + * and use only Master volume. + */ + rename_ctl(codec, "Front Playback Volume", "DAC1 Playback Volume"); + rename_ctl(codec, "Bass Speaker Playback Volume", "DAC2 Playback Volume"); + break; + } +} + static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -5937,6 +5974,50 @@ static void alc_fixup_disable_mic_vref(struct hda_codec *codec, snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); } + +static void alc294_gx502_toggle_output(struct hda_codec *codec, + struct hda_jack_callback *cb) +{ + /* The Windows driver sets the codec up in a very different way where + * it appears to leave 0x10 = 0x8a20 set. For Linux we need to toggle it + */ + if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT) + alc_write_coef_idx(codec, 0x10, 0x8a20); + else + alc_write_coef_idx(codec, 0x10, 0x0a20); +} + +static void alc294_fixup_gx502_hp(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + /* Pin 0x21: headphones/headset mic */ + if (!is_jack_detectable(codec, 0x21)) + return; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + snd_hda_jack_detect_enable_callback(codec, 0x21, + alc294_gx502_toggle_output); + break; + case HDA_FIXUP_ACT_INIT: + /* Make sure to start in a correct state, i.e. if + * headphones have been plugged in before powering up the system + */ + alc294_gx502_toggle_output(codec, NULL); + break; + } +} + +static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action != HDA_FIXUP_ACT_INIT) + return; + + msleep(100); + alc_write_coef_idx(codec, 0x65, 0x0); +} + /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" @@ -6103,14 +6184,32 @@ enum { ALC289_FIXUP_DUAL_SPK, ALC294_FIXUP_SPK2_TO_DAC1, ALC294_FIXUP_ASUS_DUAL_SPK, + ALC285_FIXUP_THINKPAD_X1_GEN7, ALC285_FIXUP_THINKPAD_HEADSET_JACK, ALC294_FIXUP_ASUS_HPE, ALC294_FIXUP_ASUS_COEF_1B, + ALC294_FIXUP_ASUS_GX502_HP, + ALC294_FIXUP_ASUS_GX502_PINS, + ALC294_FIXUP_ASUS_GX502_VERBS, ALC285_FIXUP_HP_GPIO_LED, ALC285_FIXUP_HP_MUTE_LED, ALC236_FIXUP_HP_MUTE_LED, ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, + ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS, + ALC269VC_FIXUP_ACER_HEADSET_MIC, + ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE, + ALC289_FIXUP_ASUS_GA401, + ALC289_FIXUP_ASUS_GA502, + ALC256_FIXUP_ACER_MIC_NO_PRESENCE, + ALC285_FIXUP_HP_GPIO_AMP_INIT, + ALC269_FIXUP_CZC_B20, + ALC269_FIXUP_CZC_TMI, + ALC269_FIXUP_CZC_L101, + ALC269_FIXUP_LEMOTE_A1802, + ALC269_FIXUP_LEMOTE_A190X, + ALC256_FIXUP_INTEL_NUC8_RUGGED, + ALC255_FIXUP_XIAOMI_HEADSET_MIC, }; static const struct hda_fixup alc269_fixups[] = { @@ -7076,7 +7175,7 @@ static const struct hda_fixup alc269_fixups[] = { { } }, .chained = true, - .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC + .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC294_FIXUP_ASUS_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, @@ -7085,7 +7184,7 @@ static const struct hda_fixup alc269_fixups[] = { { } }, .chained = true, - .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC + .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC294_FIXUP_ASUS_SPK] = { .type = HDA_FIXUP_VERBS, @@ -7093,6 +7192,8 @@ static const struct hda_fixup alc269_fixups[] = { /* Set EAPD high */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 }, + { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 }, { } }, .chained = true, @@ -7233,11 +7334,17 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC294_FIXUP_SPK2_TO_DAC1 }, + [ALC285_FIXUP_THINKPAD_X1_GEN7] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_thinkpad_x1_gen7, + .chained = true, + .chain_id = ALC269_FIXUP_THINKPAD_ACPI + }, [ALC285_FIXUP_THINKPAD_HEADSET_JACK] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_jack, .chained = true, - .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1 + .chain_id = ALC285_FIXUP_THINKPAD_X1_GEN7 }, [ALC294_FIXUP_ASUS_HPE] = { .type = HDA_FIXUP_VERBS, @@ -7250,6 +7357,33 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC }, + [ALC294_FIXUP_ASUS_GX502_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11050 }, /* front HP mic */ + { 0x1a, 0x01a11830 }, /* rear external mic */ + { 0x21, 0x03211020 }, /* front HP out */ + { } + }, + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_GX502_VERBS + }, + [ALC294_FIXUP_ASUS_GX502_VERBS] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + /* set 0x15 to HP-OUT ctrl */ + { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 }, + /* unmute the 0x15 amp */ + { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 }, + { } + }, + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_GX502_HP + }, + [ALC294_FIXUP_ASUS_GX502_HP] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc294_fixup_gx502_hp, + }, [ALC294_FIXUP_ASUS_COEF_1B] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { @@ -7289,6 +7423,166 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, + [ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x90100120 }, /* use as internal speaker */ + { 0x18, 0x02a111f0 }, /* use as headset mic, without its own jack detect */ + { 0x1a, 0x01011020 }, /* use as line out */ + { }, + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC + }, + [ALC269VC_FIXUP_ACER_HEADSET_MIC] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x18, 0x02a11030 }, /* use as headset mic */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC + }, + [ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x18, 0x01a11130 }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC + }, + [ALC289_FIXUP_ASUS_GA401] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11020 }, /* headset mic with jack detect */ + { } + }, + }, + [ALC289_FIXUP_ASUS_GA502] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11020 }, /* headset mic with jack detect */ + { } + }, + }, + [ALC256_FIXUP_ACER_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x02a11120 }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE + }, + [ALC285_FIXUP_HP_GPIO_AMP_INIT] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_gpio_amp_init, + .chained = true, + .chain_id = ALC285_FIXUP_HP_GPIO_LED + }, + [ALC269_FIXUP_CZC_B20] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x12, 0x411111f0 }, + { 0x14, 0x90170110 }, /* speaker */ + { 0x15, 0x032f1020 }, /* HP out */ + { 0x17, 0x411111f0 }, + { 0x18, 0x03ab1040 }, /* mic */ + { 0x19, 0xb7a7013f }, + { 0x1a, 0x0181305f }, + { 0x1b, 0x411111f0 }, + { 0x1d, 0x411111f0 }, + { 0x1e, 0x411111f0 }, + { } + }, + .chain_id = ALC269_FIXUP_DMIC, + }, + [ALC269_FIXUP_CZC_TMI] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x12, 0x4000c000 }, + { 0x14, 0x90170110 }, /* speaker */ + { 0x15, 0x0421401f }, /* HP out */ + { 0x17, 0x411111f0 }, + { 0x18, 0x04a19020 }, /* mic */ + { 0x19, 0x411111f0 }, + { 0x1a, 0x411111f0 }, + { 0x1b, 0x411111f0 }, + { 0x1d, 0x40448505 }, + { 0x1e, 0x411111f0 }, + { 0x20, 0x8000ffff }, + { } + }, + .chain_id = ALC269_FIXUP_DMIC, + }, + [ALC269_FIXUP_CZC_L101] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x12, 0x40000000 }, + { 0x14, 0x01014010 }, /* speaker */ + { 0x15, 0x411111f0 }, /* HP out */ + { 0x16, 0x411111f0 }, + { 0x18, 0x01a19020 }, /* mic */ + { 0x19, 0x02a19021 }, + { 0x1a, 0x0181302f }, + { 0x1b, 0x0221401f }, + { 0x1c, 0x411111f0 }, + { 0x1d, 0x4044c601 }, + { 0x1e, 0x411111f0 }, + { } + }, + .chain_id = ALC269_FIXUP_DMIC, + }, + [ALC269_FIXUP_LEMOTE_A1802] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x12, 0x40000000 }, + { 0x14, 0x90170110 }, /* speaker */ + { 0x17, 0x411111f0 }, + { 0x18, 0x03a19040 }, /* mic1 */ + { 0x19, 0x90a70130 }, /* mic2 */ + { 0x1a, 0x411111f0 }, + { 0x1b, 0x411111f0 }, + { 0x1d, 0x40489d2d }, + { 0x1e, 0x411111f0 }, + { 0x20, 0x0003ffff }, + { 0x21, 0x03214020 }, + { } + }, + .chain_id = ALC269_FIXUP_DMIC, + }, + [ALC269_FIXUP_LEMOTE_A190X] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x99130110 }, /* speaker */ + { 0x15, 0x0121401f }, /* HP out */ + { 0x18, 0x01a19c20 }, /* rear mic */ + { 0x19, 0x99a3092f }, /* front mic */ + { 0x1b, 0x0201401f }, /* front lineout */ + { } + }, + .chain_id = ALC269_FIXUP_DMIC, + }, + [ALC256_FIXUP_INTEL_NUC8_RUGGED] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1b, 0x01a1913c }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MODE + }, + [ALC255_FIXUP_XIAOMI_HEADSET_MIC] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 }, + { } + }, + .chained = true, + .chain_id = ALC289_FIXUP_ASUS_GA401 + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -7304,16 +7598,20 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK), + SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS), + SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X), @@ -7433,7 +7731,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), - SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), @@ -7455,6 +7755,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), @@ -7464,6 +7765,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), @@ -7483,11 +7787,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), + SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), + SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), @@ -7531,8 +7840,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), - SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), - SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), + SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), @@ -7568,9 +7876,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), + SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20), + SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI), + SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101), SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ + SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE), + SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802), + SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X), + SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED), #if 0 /* Below is a quirk table taken from the old code. @@ -7742,6 +8057,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"}, {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"}, {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"}, + {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"}, + {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"}, {} }; #define ALC225_STANDARD_PINS \ @@ -8855,6 +9172,7 @@ enum { ALC662_FIXUP_LED_GPIO1, ALC662_FIXUP_IDEAPAD, ALC272_FIXUP_MARIO, + ALC662_FIXUP_CZC_ET26, ALC662_FIXUP_CZC_P10T, ALC662_FIXUP_SKU_IGNORE, ALC662_FIXUP_HP_RP5800, @@ -8924,6 +9242,25 @@ static const struct hda_fixup alc662_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc272_fixup_mario, }, + [ALC662_FIXUP_CZC_ET26] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + {0x12, 0x403cc000}, + {0x14, 0x90170110}, /* speaker */ + {0x15, 0x411111f0}, + {0x16, 0x411111f0}, + {0x18, 0x01a19030}, /* mic */ + {0x19, 0x90a7013f}, /* int-mic */ + {0x1a, 0x01014020}, + {0x1b, 0x0121401f}, + {0x1c, 0x411111f0}, + {0x1d, 0x411111f0}, + {0x1e, 0x40478e35}, + {} + }, + .chained = true, + .chain_id = ALC662_FIXUP_SKU_IGNORE + }, [ALC662_FIXUP_CZC_P10T] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { @@ -9307,6 +9644,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO), SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68), SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON), + SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26), SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 4b9300babc7d..bfd3fe5eff31 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -832,7 +832,7 @@ static int stac_auto_create_beep_ctls(struct hda_codec *codec, static struct snd_kcontrol_new beep_vol_ctl = HDA_CODEC_VOLUME(NULL, 0, 0, 0); - /* check for mute support for the the amp */ + /* check for mute support for the amp */ if ((caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT) { const struct snd_kcontrol_new *temp; if (spec->anabeep_nid == nid) diff --git a/sound/pci/ice1712/prodigy192.c b/sound/pci/ice1712/prodigy192.c index 98f8ac658796..243f757da3ed 100644 --- a/sound/pci/ice1712/prodigy192.c +++ b/sound/pci/ice1712/prodigy192.c @@ -32,7 +32,7 @@ * Experimentally I found out that only a combination of * OCKS0=1, OCKS1=1 (128fs, 64fs output) and ice1724 - * VT1724_MT_I2S_MCLK_128X=0 (256fs input) yields correct - * sampling rate. That means the the FPGA doubles the + * sampling rate. That means that the FPGA doubles the * MCK01 rate. * * Copyright (c) 2003 Takashi Iwai diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c index c3f8721624cd..b90421a1d909 100644 --- a/sound/pci/oxygen/xonar_dg.c +++ b/sound/pci/oxygen/xonar_dg.c @@ -29,7 +29,7 @@ * GPIO 4 <- headphone detect * GPIO 5 -> enable ADC analog circuit for the left channel * GPIO 6 -> enable ADC analog circuit for the right channel - * GPIO 7 -> switch green rear output jack between CS4245 and and the first + * GPIO 7 -> switch green rear output jack between CS4245 and the first * channel of CS4361 (mechanical relay) * GPIO 8 -> enable output to speakers * diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c index cae1def8902d..96718e3a1ad0 100644 --- a/sound/soc/codecs/max98373.c +++ b/sound/soc/codecs/max98373.c @@ -850,8 +850,8 @@ static int max98373_resume(struct device *dev) { struct max98373_priv *max98373 = dev_get_drvdata(dev); - max98373_reset(max98373, dev); regcache_cache_only(max98373->regmap, false); + max98373_reset(max98373, dev); regcache_sync(max98373->regmap); return 0; } diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index c820d5a386f6..cf6516693e4e 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -19,8 +19,8 @@ #define CDC_D_REVISION1 (0xf000) #define CDC_D_PERPH_SUBTYPE (0xf005) -#define CDC_D_INT_EN_SET (0x015) -#define CDC_D_INT_EN_CLR (0x016) +#define CDC_D_INT_EN_SET (0xf015) +#define CDC_D_INT_EN_CLR (0xf016) #define MBHC_SWITCH_INT BIT(7) #define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6) #define MBHC_BUTTON_PRESS_DET BIT(5) diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 19662ee330d6..c83f7f5da96b 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -3625,6 +3625,12 @@ static const struct rt5645_platform_data asus_t100ha_platform_data = { .inv_jd1_1 = true, }; +static const struct rt5645_platform_data asus_t101ha_platform_data = { + .dmic1_data_pin = RT5645_DMIC_DATA_IN2N, + .dmic2_data_pin = RT5645_DMIC2_DISABLE, + .jd_mode = 3, +}; + static const struct rt5645_platform_data lenovo_ideapad_miix_310_pdata = { .jd_mode = 3, .in2_diff = true, @@ -3702,6 +3708,14 @@ static const struct dmi_system_id dmi_platform_data[] = { }, .driver_data = (void *)&asus_t100ha_platform_data, }, + { + .ident = "ASUS T101HA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "T101HA"), + }, + .driver_data = (void *)&asus_t101ha_platform_data, + }, { .ident = "MINIX Z83-4", .matches = { diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c index 70fee6849ab0..f21181734170 100644 --- a/sound/soc/codecs/rt5670.c +++ b/sound/soc/codecs/rt5670.c @@ -31,18 +31,19 @@ #include "rt5670.h" #include "rt5670-dsp.h" -#define RT5670_DEV_GPIO BIT(0) -#define RT5670_IN2_DIFF BIT(1) -#define RT5670_DMIC_EN BIT(2) -#define RT5670_DMIC1_IN2P BIT(3) -#define RT5670_DMIC1_GPIO6 BIT(4) -#define RT5670_DMIC1_GPIO7 BIT(5) -#define RT5670_DMIC2_INR BIT(6) -#define RT5670_DMIC2_GPIO8 BIT(7) -#define RT5670_DMIC3_GPIO5 BIT(8) -#define RT5670_JD_MODE1 BIT(9) -#define RT5670_JD_MODE2 BIT(10) -#define RT5670_JD_MODE3 BIT(11) +#define RT5670_DEV_GPIO BIT(0) +#define RT5670_IN2_DIFF BIT(1) +#define RT5670_DMIC_EN BIT(2) +#define RT5670_DMIC1_IN2P BIT(3) +#define RT5670_DMIC1_GPIO6 BIT(4) +#define RT5670_DMIC1_GPIO7 BIT(5) +#define RT5670_DMIC2_INR BIT(6) +#define RT5670_DMIC2_GPIO8 BIT(7) +#define RT5670_DMIC3_GPIO5 BIT(8) +#define RT5670_JD_MODE1 BIT(9) +#define RT5670_JD_MODE2 BIT(10) +#define RT5670_JD_MODE3 BIT(11) +#define RT5670_GPIO1_IS_EXT_SPK_EN BIT(12) static unsigned long rt5670_quirk; static unsigned int quirk_override; @@ -1447,6 +1448,33 @@ static int rt5670_hp_event(struct snd_soc_dapm_widget *w, return 0; } +static int rt5670_spk_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component); + + if (!rt5670->pdata.gpio1_is_ext_spk_en) + return 0; + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, + RT5670_GP1_OUT_MASK, RT5670_GP1_OUT_HI); + break; + + case SND_SOC_DAPM_PRE_PMD: + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, + RT5670_GP1_OUT_MASK, RT5670_GP1_OUT_LO); + break; + + default: + return 0; + } + + return 0; +} + static int rt5670_bst1_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -1860,7 +1888,9 @@ static const struct snd_soc_dapm_widget rt5670_specific_dapm_widgets[] = { }; static const struct snd_soc_dapm_widget rt5672_specific_dapm_widgets[] = { - SND_SOC_DAPM_PGA("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_PGA_E("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0, + rt5670_spk_event, SND_SOC_DAPM_PRE_PMD | + SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_OUTPUT("SPOLP"), SND_SOC_DAPM_OUTPUT("SPOLN"), SND_SOC_DAPM_OUTPUT("SPORP"), @@ -2857,14 +2887,14 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = { }, { .callback = rt5670_quirk_cb, - .ident = "Lenovo Thinkpad Tablet 10", + .ident = "Lenovo Miix 2 10", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"), }, .driver_data = (unsigned long *)(RT5670_DMIC_EN | RT5670_DMIC1_IN2P | - RT5670_DEV_GPIO | + RT5670_GPIO1_IS_EXT_SPK_EN | RT5670_JD_MODE2), }, { @@ -2924,6 +2954,10 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, rt5670->pdata.dev_gpio = true; dev_info(&i2c->dev, "quirk dev_gpio\n"); } + if (rt5670_quirk & RT5670_GPIO1_IS_EXT_SPK_EN) { + rt5670->pdata.gpio1_is_ext_spk_en = true; + dev_info(&i2c->dev, "quirk GPIO1 is external speaker enable\n"); + } if (rt5670_quirk & RT5670_IN2_DIFF) { rt5670->pdata.in2_diff = true; dev_info(&i2c->dev, "quirk IN2_DIFF\n"); @@ -3023,6 +3057,13 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT); } + if (rt5670->pdata.gpio1_is_ext_spk_en) { + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL1, + RT5670_GP1_PIN_MASK, RT5670_GP1_PIN_GPIO1); + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, + RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT); + } + if (rt5670->pdata.jd_mode) { regmap_update_bits(rt5670->regmap, RT5670_GLB_CLK, RT5670_SCLK_SRC_MASK, RT5670_SCLK_SRC_RCCLK); diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h index a8c3e44770b8..de0203369b7c 100644 --- a/sound/soc/codecs/rt5670.h +++ b/sound/soc/codecs/rt5670.h @@ -757,7 +757,7 @@ #define RT5670_PWR_VREF2_BIT 4 #define RT5670_PWR_FV2 (0x1 << 3) #define RT5670_PWR_FV2_BIT 3 -#define RT5670_LDO_SEL_MASK (0x3) +#define RT5670_LDO_SEL_MASK (0x7) #define RT5670_LDO_SEL_SFT 0 /* Power Management for Analog 2 (0x64) */ diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c index 18535b326680..04f23477039a 100644 --- a/sound/soc/codecs/wm8958-dsp2.c +++ b/sound/soc/codecs/wm8958-dsp2.c @@ -416,8 +416,12 @@ int wm8958_aif_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct wm8994 *control = dev_get_drvdata(component->dev->parent); int i; + if (control->type != WM8958) + return 0; + switch (event) { case SND_SOC_DAPM_POST_PMU: case SND_SOC_DAPM_PRE_PMU: diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c index 3347577ce677..bd435aa6591b 100644 --- a/sound/soc/fsl/fsl_asrc_dma.c +++ b/sound/soc/fsl/fsl_asrc_dma.c @@ -266,6 +266,7 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream, ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be); if (ret) { dev_err(dev, "failed to config DMA channel for Back-End\n"); + dma_release_channel(pair->dma_chan[dir]); return ret; } diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index 6508e2d2bf05..74e63f2e8d6a 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -997,10 +997,10 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai) unsigned char offset = sai->soc->reg_offset; regmap_update_bits(sai->regmap, FSL_SAI_TCR1(offset), - sai->soc->fifo_depth - 1, + FSL_SAI_CR1_RFW_MASK(sai->soc->fifo_depth), sai->soc->fifo_depth - FSL_SAI_MAXBURST_TX); regmap_update_bits(sai->regmap, FSL_SAI_RCR1(offset), - sai->soc->fifo_depth - 1, + FSL_SAI_CR1_RFW_MASK(sai->soc->fifo_depth), FSL_SAI_MAXBURST_RX - 1); snd_soc_dai_init_dma_data(cpu_dai, &sai->dma_params_tx, diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h index 91e153e88ae2..e59ba6c9c01f 100644 --- a/sound/soc/fsl/fsl_sai.h +++ b/sound/soc/fsl/fsl_sai.h @@ -110,7 +110,7 @@ #define FSL_SAI_CSR_FRDE BIT(0) /* SAI Transmit and Receive Configuration 1 Register */ -#define FSL_SAI_CR1_RFW_MASK 0x1f +#define FSL_SAI_CR1_RFW_MASK(x) ((x) - 1) /* SAI Transmit and Receive Configuration 2 Register */ #define FSL_SAI_CR2_SYNC BIT(30) diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index ee3df508dbfe..a7d50d60cadb 100755 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -681,8 +681,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, struct regmap *regs = ssi->regs; u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i; unsigned long clkrate, baudrate, tmprate; - unsigned int slots = params_channels(hw_params); - unsigned int slot_width = 32; + unsigned int channels = params_channels(hw_params); + unsigned int slot_width = params_width(hw_params); + unsigned int slots = 2; u64 sub, savesub = 100000; unsigned int freq; bool baudclk_is_used; @@ -691,10 +692,14 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, /* Override slots and slot_width if being specifically set... */ if (ssi->slots) slots = ssi->slots; - /* ...but keep 32 bits if slots is 2 -- I2S Master mode */ - if (ssi->slot_width && slots != 2) + if (ssi->slot_width) slot_width = ssi->slot_width; + /* ...but force 32 bits for stereo audio using I2S Master Mode */ + if (channels == 2 && + (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK) == SSI_SCR_I2S_MODE_MASTER) + slot_width = 32; + /* Generate bit clock based on the slot number and slot width */ freq = slots * slot_width * params_rate(hw_params); diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c index fdd2c73fd2fa..bb668551dd4b 100644 --- a/sound/soc/img/img-i2s-in.c +++ b/sound/soc/img/img-i2s-in.c @@ -343,8 +343,10 @@ static int img_i2s_in_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) chan_control_mask = IMG_I2S_IN_CH_CTL_CLK_TRANS_MASK; ret = pm_runtime_get_sync(i2s->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(i2s->dev); return ret; + } for (i = 0; i < i2s->active_channels; i++) img_i2s_in_ch_disable(i2s, i); @@ -482,6 +484,7 @@ static int img_i2s_in_probe(struct platform_device *pdev) if (IS_ERR(rst)) { if (PTR_ERR(rst) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; + pm_runtime_put(&pdev->dev); goto err_suspend; } diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c index 5ddbe3a31c2e..4da49a42e854 100644 --- a/sound/soc/img/img-parallel-out.c +++ b/sound/soc/img/img-parallel-out.c @@ -163,8 +163,10 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) } ret = pm_runtime_get_sync(prl->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(prl->dev); return ret; + } reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL); reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set; diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 8cc3cc363eb0..31f1dd6541aa 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -331,7 +331,7 @@ static int sst_media_open(struct snd_pcm_substream *substream, ret_val = power_up_sst(stream); if (ret_val < 0) - return ret_val; + goto out_power_up; /* Make sure, that the period size is always even */ snd_pcm_hw_constraint_step(substream->runtime, 0, @@ -340,8 +340,9 @@ static int sst_media_open(struct snd_pcm_substream *substream, return snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); out_ops: - kfree(stream); mutex_unlock(&sst_lock); +out_power_up: + kfree(stream); return ret_val; } diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c index adf416a49b48..60fb87495050 100644 --- a/sound/soc/intel/boards/bxt_rt298.c +++ b/sound/soc/intel/boards/bxt_rt298.c @@ -556,6 +556,7 @@ static int bxt_card_late_probe(struct snd_soc_card *card) /* broxton audio machine driver for SPT + RT298S */ static struct snd_soc_card broxton_rt298 = { .name = "broxton-rt298", + .owner = THIS_MODULE, .dai_link = broxton_rt298_dais, .num_links = ARRAY_SIZE(broxton_rt298_dais), .controls = broxton_controls, @@ -571,6 +572,7 @@ static struct snd_soc_card broxton_rt298 = { static struct snd_soc_card geminilake_rt298 = { .name = "geminilake-rt298", + .owner = THIS_MODULE, .dai_link = broxton_rt298_dais, .num_links = ARRAY_SIZE(broxton_rt298_dais), .controls = broxton_controls, diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index 54e97455d7f6..ed332177b0f9 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -548,8 +548,10 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev) if (cnt) { ret = device_add_properties(codec_dev, props); - if (ret) + if (ret) { + put_device(codec_dev); return ret; + } } devm_acpi_dev_add_driver_gpios(codec_dev, byt_cht_es8316_gpios); diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index e62e1d7815aa..f7964d1ec486 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -742,6 +742,30 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { /* Toshiba Encore WT8-A */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT8-A"), + }, + .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_JD_NOT_INV | + BYT_RT5640_MCLK_EN), + }, + { /* Toshiba Encore WT10-A */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A-103"), + }, + .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | + BYT_RT5640_JD_SRC_JD1_IN4P | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_SSP0_AIF2 | + BYT_RT5640_MCLK_EN), + }, { /* Catch-all for generic Insyde tablets, must be last */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c index 1f698adde506..7126344017fa 100644 --- a/sound/soc/meson/axg-card.c +++ b/sound/soc/meson/axg-card.c @@ -266,7 +266,7 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card, lb = &card->dai_link[*index + 1]; - lb->name = kasprintf(GFP_KERNEL, "%s-lb", pad->name); + lb->name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-lb", pad->name); if (!lb->name) return -ENOMEM; diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c index d286dff3171d..898ef1d5608f 100644 --- a/sound/soc/meson/axg-fifo.c +++ b/sound/soc/meson/axg-fifo.c @@ -244,7 +244,7 @@ static int axg_fifo_pcm_open(struct snd_pcm_substream *ss) /* Enable pclk to access registers and clock the fifo ip */ ret = clk_prepare_enable(fifo->pclk); if (ret) - return ret; + goto free_irq; /* Setup status2 so it reports the memory pointer */ regmap_update_bits(fifo->map, FIFO_CTRL1, @@ -264,8 +264,14 @@ static int axg_fifo_pcm_open(struct snd_pcm_substream *ss) /* Take memory arbitror out of reset */ ret = reset_control_deassert(fifo->arb); if (ret) - clk_disable_unprepare(fifo->pclk); + goto free_clk; + return 0; + +free_clk: + clk_disable_unprepare(fifo->pclk); +free_irq: + free_irq(fifo->irq, ss); return ret; } diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c index 358c8c0d861c..f7e8e9da68a0 100644 --- a/sound/soc/meson/axg-tdm-formatter.c +++ b/sound/soc/meson/axg-tdm-formatter.c @@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks); static int axg_tdm_formatter_enable(struct axg_tdm_formatter *formatter) { struct axg_tdm_stream *ts = formatter->stream; - bool invert = formatter->drv->quirks->invert_sclk; + bool invert; int ret; /* Do nothing if the formatter is already enabled */ @@ -96,11 +96,12 @@ static int axg_tdm_formatter_enable(struct axg_tdm_formatter *formatter) return ret; /* - * If sclk is inverted, invert it back and provide the inversion - * required by the formatter + * If sclk is inverted, it means the bit should latched on the + * rising edge which is what our HW expects. If not, we need to + * invert it before the formatter. */ - invert ^= axg_tdm_sclk_invert(ts->iface->fmt); - ret = clk_set_phase(formatter->sclk, invert ? 180 : 0); + invert = axg_tdm_sclk_invert(ts->iface->fmt); + ret = clk_set_phase(formatter->sclk, invert ? 0 : 180); if (ret) return ret; diff --git a/sound/soc/meson/axg-tdm-formatter.h b/sound/soc/meson/axg-tdm-formatter.h index 9ef98e955cb2..a1f0dcc0ff13 100644 --- a/sound/soc/meson/axg-tdm-formatter.h +++ b/sound/soc/meson/axg-tdm-formatter.h @@ -16,7 +16,6 @@ struct snd_kcontrol; struct axg_tdm_formatter_hw { unsigned int skew_offset; - bool invert_sclk; }; struct axg_tdm_formatter_ops { diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c index d51f3344be7c..e25336f73912 100644 --- a/sound/soc/meson/axg-tdm-interface.c +++ b/sound/soc/meson/axg-tdm-interface.c @@ -119,16 +119,23 @@ static int axg_tdm_iface_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai); - /* These modes are not supported */ - if (fmt & (SND_SOC_DAIFMT_CBS_CFM | SND_SOC_DAIFMT_CBM_CFS)) { - dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n"); - return -EINVAL; - } + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBS_CFS: + if (!iface->mclk) { + dev_err(dai->dev, "cpu clock master: mclk missing\n"); + return -ENODEV; + } + break; - /* If the TDM interface is the clock master, it requires mclk */ - if (!iface->mclk && (fmt & SND_SOC_DAIFMT_CBS_CFS)) { - dev_err(dai->dev, "cpu clock master: mclk missing\n"); - return -ENODEV; + case SND_SOC_DAIFMT_CBM_CFM: + break; + + case SND_SOC_DAIFMT_CBS_CFM: + case SND_SOC_DAIFMT_CBM_CFS: + dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n"); + /* Fall-through */ + default: + return -EINVAL; } iface->fmt = fmt; @@ -319,7 +326,8 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream, if (ret) return ret; - if (iface->fmt & SND_SOC_DAIFMT_CBS_CFS) { + if ((iface->fmt & SND_SOC_DAIFMT_MASTER_MASK) == + SND_SOC_DAIFMT_CBS_CFS) { ret = axg_tdm_iface_set_sclk(dai, params); if (ret) return ret; diff --git a/sound/soc/meson/axg-tdmin.c b/sound/soc/meson/axg-tdmin.c index 973d4c02ef8d..88ed95ae886b 100644 --- a/sound/soc/meson/axg-tdmin.c +++ b/sound/soc/meson/axg-tdmin.c @@ -228,15 +228,29 @@ static const struct axg_tdm_formatter_driver axg_tdmin_drv = { .regmap_cfg = &axg_tdmin_regmap_cfg, .ops = &axg_tdmin_ops, .quirks = &(const struct axg_tdm_formatter_hw) { - .invert_sclk = false, .skew_offset = 2, }, }; +static const struct axg_tdm_formatter_driver g12a_tdmin_drv = { + .component_drv = &axg_tdmin_component_drv, + .regmap_cfg = &axg_tdmin_regmap_cfg, + .ops = &axg_tdmin_ops, + .quirks = &(const struct axg_tdm_formatter_hw) { + .skew_offset = 3, + }, +}; + static const struct of_device_id axg_tdmin_of_match[] = { { .compatible = "amlogic,axg-tdmin", .data = &axg_tdmin_drv, + }, { + .compatible = "amlogic,g12a-tdmin", + .data = &g12a_tdmin_drv, + }, { + .compatible = "amlogic,sm1-tdmin", + .data = &g12a_tdmin_drv, }, {} }; MODULE_DEVICE_TABLE(of, axg_tdmin_of_match); diff --git a/sound/soc/meson/axg-tdmout.c b/sound/soc/meson/axg-tdmout.c index 418ec314b37d..3ceabddae629 100644 --- a/sound/soc/meson/axg-tdmout.c +++ b/sound/soc/meson/axg-tdmout.c @@ -238,7 +238,6 @@ static const struct axg_tdm_formatter_driver axg_tdmout_drv = { .regmap_cfg = &axg_tdmout_regmap_cfg, .ops = &axg_tdmout_ops, .quirks = &(const struct axg_tdm_formatter_hw) { - .invert_sclk = true, .skew_offset = 1, }, }; @@ -248,7 +247,6 @@ static const struct axg_tdm_formatter_driver g12a_tdmout_drv = { .regmap_cfg = &axg_tdmout_regmap_cfg, .ops = &axg_tdmout_ops, .quirks = &(const struct axg_tdm_formatter_hw) { - .invert_sclk = true, .skew_offset = 2, }, }; @@ -309,7 +307,6 @@ static const struct axg_tdm_formatter_driver sm1_tdmout_drv = { .regmap_cfg = &axg_tdmout_regmap_cfg, .ops = &axg_tdmout_ops, .quirks = &(const struct axg_tdm_formatter_hw) { - .invert_sclk = true, .skew_offset = 2, }, }; diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c index ecf41c7549a6..32b9fd59353a 100644 --- a/sound/soc/meson/axg-toddr.c +++ b/sound/soc/meson/axg-toddr.c @@ -18,6 +18,7 @@ #define CTRL0_TODDR_SEL_RESAMPLE BIT(30) #define CTRL0_TODDR_EXT_SIGNED BIT(29) #define CTRL0_TODDR_PP_MODE BIT(28) +#define CTRL0_TODDR_SYNC_CH BIT(27) #define CTRL0_TODDR_TYPE_MASK GENMASK(15, 13) #define CTRL0_TODDR_TYPE(x) ((x) << 13) #define CTRL0_TODDR_MSB_POS_MASK GENMASK(12, 8) @@ -184,10 +185,31 @@ static const struct axg_fifo_match_data axg_toddr_match_data = { .dai_drv = &axg_toddr_dai_drv }; +static int g12a_toddr_dai_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai); + int ret; + + ret = axg_toddr_dai_startup(substream, dai); + if (ret) + return ret; + + /* + * Make sure the first channel ends up in the at beginning of the output + * As weird as it looks, without this the first channel may be misplaced + * in memory, with a random shift of 2 channels. + */ + regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_SYNC_CH, + CTRL0_TODDR_SYNC_CH); + + return 0; +} + static const struct snd_soc_dai_ops g12a_toddr_ops = { .prepare = g12a_toddr_dai_prepare, .hw_params = axg_toddr_dai_hw_params, - .startup = axg_toddr_dai_startup, + .startup = g12a_toddr_dai_startup, .shutdown = axg_toddr_dai_shutdown, }; diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig index 60086858e920..b9d8fe9f996a 100644 --- a/sound/soc/qcom/Kconfig +++ b/sound/soc/qcom/Kconfig @@ -72,7 +72,7 @@ config SND_SOC_QDSP6_ASM_DAI config SND_SOC_QDSP6 tristate "SoC ALSA audio driver for QDSP6" - depends on QCOM_APR && HAS_DMA + depends on QCOM_APR select SND_SOC_QDSP6_COMMON select SND_SOC_QDSP6_CORE select SND_SOC_QDSP6_AFE diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c index ac75838bbfab..15a88020dfab 100644 --- a/sound/soc/qcom/apq8016_sbc.c +++ b/sound/soc/qcom/apq8016_sbc.c @@ -235,6 +235,7 @@ static int apq8016_sbc_platform_probe(struct platform_device *pdev) return -ENOMEM; card->dev = dev; + card->owner = THIS_MODULE; card->dapm_widgets = apq8016_sbc_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(apq8016_sbc_dapm_widgets); data = apq8016_sbc_parse_of(card); diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c index 94363fd6846a..c10c5f2ec29b 100644 --- a/sound/soc/qcom/apq8096.c +++ b/sound/soc/qcom/apq8096.c @@ -114,6 +114,7 @@ static int apq8096_platform_probe(struct platform_device *pdev) return -ENOMEM; card->dev = dev; + card->owner = THIS_MODULE; dev_set_drvdata(dev, card); ret = qcom_snd_parse_of(card); if (ret) { diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c index 6c20bdd850f3..10322690c0ea 100644 --- a/sound/soc/qcom/common.c +++ b/sound/soc/qcom/common.c @@ -4,6 +4,7 @@ #include #include "common.h" +#include "qdsp6/q6afe.h" int qcom_snd_parse_of(struct snd_soc_card *card) { @@ -44,8 +45,10 @@ int qcom_snd_parse_of(struct snd_soc_card *card) for_each_child_of_node(dev->of_node, np) { dlc = devm_kzalloc(dev, 2 * sizeof(*dlc), GFP_KERNEL); - if (!dlc) - return -ENOMEM; + if (!dlc) { + ret = -ENOMEM; + goto err; + } link->cpus = &dlc[0]; link->platforms = &dlc[1]; @@ -101,6 +104,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card) } link->no_pcm = 1; link->ignore_pmdown_time = 1; + + if (q6afe_is_rx_port(link->id)) { + link->dpcm_playback = 1; + link->dpcm_capture = 0; + } else { + link->dpcm_playback = 0; + link->dpcm_capture = 1; + } + } else { dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL); if (!dlc) @@ -113,12 +125,12 @@ int qcom_snd_parse_of(struct snd_soc_card *card) link->codecs->dai_name = "snd-soc-dummy-dai"; link->codecs->name = "snd-soc-dummy"; link->dynamic = 1; + link->dpcm_playback = 1; + link->dpcm_capture = 1; } link->ignore_suspend = 1; link->nonatomic = 1; - link->dpcm_playback = 1; - link->dpcm_capture = 1; link->stream_name = link->name; link++; diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c index 2a5302f1db98..0168af849272 100644 --- a/sound/soc/qcom/qdsp6/q6afe-dai.c +++ b/sound/soc/qcom/qdsp6/q6afe-dai.c @@ -1150,206 +1150,206 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component, } static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = { - SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0), + SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1", "Secondary MI2S Playback SD1", - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL, - 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, SND_SOC_NOPM, 0, 0), }; static const struct snd_soc_component_driver q6afe_dai_component = { diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c index e0945f7a58c8..0ce4eb60f984 100644 --- a/sound/soc/qcom/qdsp6/q6afe.c +++ b/sound/soc/qcom/qdsp6/q6afe.c @@ -800,6 +800,14 @@ int q6afe_get_port_id(int index) } EXPORT_SYMBOL_GPL(q6afe_get_port_id); +int q6afe_is_rx_port(int index) +{ + if (index < 0 || index >= AFE_PORT_MAX) + return -EINVAL; + + return port_maps[index].is_rx; +} +EXPORT_SYMBOL_GPL(q6afe_is_rx_port); static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt, struct q6afe_port *port) { diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h index c7ed5422baff..1a0f80a14afe 100644 --- a/sound/soc/qcom/qdsp6/q6afe.h +++ b/sound/soc/qcom/qdsp6/q6afe.h @@ -198,6 +198,7 @@ int q6afe_port_start(struct q6afe_port *port); int q6afe_port_stop(struct q6afe_port *port); void q6afe_port_put(struct q6afe_port *port); int q6afe_get_port_id(int index); +int q6afe_is_rx_port(int index); void q6afe_hdmi_port_prepare(struct q6afe_port *port, struct q6afe_hdmi_cfg *cfg); void q6afe_slim_port_prepare(struct q6afe_port *port, diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c index 548eb4fa2da6..9f0ffdcef637 100644 --- a/sound/soc/qcom/qdsp6/q6asm-dai.c +++ b/sound/soc/qcom/qdsp6/q6asm-dai.c @@ -171,7 +171,7 @@ static const struct snd_compr_codec_caps q6asm_compr_caps = { }; static void event_handler(uint32_t opcode, uint32_t token, - uint32_t *payload, void *priv) + void *payload, void *priv) { struct q6asm_dai_rtd *prtd = priv; struct snd_pcm_substream *substream = prtd->substream; @@ -494,7 +494,7 @@ static struct snd_pcm_ops q6asm_dai_ops = { }; static void compress_event_handler(uint32_t opcode, uint32_t token, - uint32_t *payload, void *priv) + void *payload, void *priv) { struct q6asm_dai_rtd *prtd = priv; struct snd_compr_stream *substream = prtd->cstream; diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c index e8141a33a55e..835ac98a789c 100644 --- a/sound/soc/qcom/qdsp6/q6asm.c +++ b/sound/soc/qcom/qdsp6/q6asm.c @@ -25,6 +25,7 @@ #define ASM_STREAM_CMD_FLUSH 0x00010BCE #define ASM_SESSION_CMD_PAUSE 0x00010BD3 #define ASM_DATA_CMD_EOS 0x00010BDB +#define ASM_DATA_EVENT_RENDERED_EOS 0x00010C1C #define ASM_NULL_POPP_TOPOLOGY 0x00010C68 #define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09 #define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10 @@ -546,9 +547,6 @@ static int32_t q6asm_stream_callback(struct apr_device *adev, case ASM_SESSION_CMD_SUSPEND: client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE; break; - case ASM_DATA_CMD_EOS: - client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE; - break; case ASM_STREAM_CMD_FLUSH: client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE; break; @@ -651,6 +649,9 @@ static int32_t q6asm_stream_callback(struct apr_device *adev, spin_unlock_irqrestore(&ac->lock, flags); } + break; + case ASM_DATA_EVENT_RENDERED_EOS: + client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE; break; } diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c index ddcd9978cf57..745cc9dd14f3 100644 --- a/sound/soc/qcom/qdsp6/q6routing.c +++ b/sound/soc/qcom/qdsp6/q6routing.c @@ -996,6 +996,20 @@ static int msm_routing_probe(struct snd_soc_component *c) return 0; } +static unsigned int q6routing_reg_read(struct snd_soc_component *component, + unsigned int reg) +{ + /* default value */ + return 0; +} + +static int q6routing_reg_write(struct snd_soc_component *component, + unsigned int reg, unsigned int val) +{ + /* dummy */ + return 0; +} + static const struct snd_soc_component_driver msm_soc_routing_component = { .ops = &q6pcm_routing_ops, .probe = msm_routing_probe, @@ -1004,6 +1018,8 @@ static const struct snd_soc_component_driver msm_soc_routing_component = { .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets), .dapm_routes = intercon, .num_dapm_routes = ARRAY_SIZE(intercon), + .read = q6routing_reg_read, + .write = q6routing_reg_write, }; static int q6pcm_routing_probe(struct platform_device *pdev) diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c index 28f3cef696e6..7e6c41e63d8e 100644 --- a/sound/soc/qcom/sdm845.c +++ b/sound/soc/qcom/sdm845.c @@ -410,6 +410,7 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev) card->dapm_widgets = sdm845_snd_widgets; card->num_dapm_widgets = ARRAY_SIZE(sdm845_snd_widgets); card->dev = dev; + card->owner = THIS_MODULE; dev_set_drvdata(dev, card); ret = qcom_snd_parse_of(card); if (ret) { diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c index e6666e597265..236759179100 100644 --- a/sound/soc/qcom/storm.c +++ b/sound/soc/qcom/storm.c @@ -96,6 +96,7 @@ static int storm_platform_probe(struct platform_device *pdev) return -ENOMEM; card->dev = &pdev->dev; + card->owner = THIS_MODULE; ret = snd_soc_of_parse_card_name(card, "qcom,model"); if (ret) { diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c index 7cd42fcfcf38..1707414cfa92 100644 --- a/sound/soc/rockchip/rockchip_pdm.c +++ b/sound/soc/rockchip/rockchip_pdm.c @@ -590,8 +590,10 @@ static int rockchip_pdm_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(pdm->regmap); diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c index af19010b9d88..8bd49c8a9517 100644 --- a/sound/soc/sh/rcar/gen.c +++ b/sound/soc/sh/rcar/gen.c @@ -224,6 +224,14 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv) RSND_GEN_S_REG(SSI_SYS_STATUS5, 0x884), RSND_GEN_S_REG(SSI_SYS_STATUS6, 0x888), RSND_GEN_S_REG(SSI_SYS_STATUS7, 0x88c), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE0, 0x850), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE1, 0x854), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE2, 0x858), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE3, 0x85c), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE4, 0x890), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE5, 0x894), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE6, 0x898), + RSND_GEN_S_REG(SSI_SYS_INT_ENABLE7, 0x89c), RSND_GEN_S_REG(HDMI0_SEL, 0x9e0), RSND_GEN_S_REG(HDMI1_SEL, 0x9e4), diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h index ea6cbaa9743e..d47608ff5fac 100644 --- a/sound/soc/sh/rcar/rsnd.h +++ b/sound/soc/sh/rcar/rsnd.h @@ -189,6 +189,14 @@ enum rsnd_reg { SSI_SYS_STATUS5, SSI_SYS_STATUS6, SSI_SYS_STATUS7, + SSI_SYS_INT_ENABLE0, + SSI_SYS_INT_ENABLE1, + SSI_SYS_INT_ENABLE2, + SSI_SYS_INT_ENABLE3, + SSI_SYS_INT_ENABLE4, + SSI_SYS_INT_ENABLE5, + SSI_SYS_INT_ENABLE6, + SSI_SYS_INT_ENABLE7, HDMI0_SEL, HDMI1_SEL, SSI9_BUSIF0_MODE, @@ -237,6 +245,7 @@ enum rsnd_reg { #define SSI9_BUSIF_ADINR(i) (SSI9_BUSIF0_ADINR + (i)) #define SSI9_BUSIF_DALIGN(i) (SSI9_BUSIF0_DALIGN + (i)) #define SSI_SYS_STATUS(i) (SSI_SYS_STATUS0 + (i)) +#define SSI_SYS_INT_ENABLE(i) (SSI_SYS_INT_ENABLE0 + (i)) struct rsnd_priv; diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 4a7d3413917f..47d5ddb526f2 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -372,6 +372,9 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod, u32 wsr = ssi->wsr; int width; int is_tdm, is_tdm_split; + int id = rsnd_mod_id(mod); + int i; + u32 sys_int_enable = 0; is_tdm = rsnd_runtime_is_tdm(io); is_tdm_split = rsnd_runtime_is_tdm_split(io); @@ -447,6 +450,38 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod, cr_mode = DIEN; /* PIO : enable Data interrupt */ } + /* enable busif buffer over/under run interrupt. */ + if (is_tdm || is_tdm_split) { + switch (id) { + case 0: + case 1: + case 2: + case 3: + case 4: + for (i = 0; i < 4; i++) { + sys_int_enable = rsnd_mod_read(mod, + SSI_SYS_INT_ENABLE(i * 2)); + sys_int_enable |= 0xf << (id * 4); + rsnd_mod_write(mod, + SSI_SYS_INT_ENABLE(i * 2), + sys_int_enable); + } + + break; + case 9: + for (i = 0; i < 4; i++) { + sys_int_enable = rsnd_mod_read(mod, + SSI_SYS_INT_ENABLE((i * 2) + 1)); + sys_int_enable |= 0xf << 4; + rsnd_mod_write(mod, + SSI_SYS_INT_ENABLE((i * 2) + 1), + sys_int_enable); + } + + break; + } + } + init_end: ssi->cr_own = cr_own; ssi->cr_mode = cr_mode; @@ -496,6 +531,13 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod, { struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); struct device *dev = rsnd_priv_to_dev(priv); + int is_tdm, is_tdm_split; + int id = rsnd_mod_id(mod); + int i; + u32 sys_int_enable = 0; + + is_tdm = rsnd_runtime_is_tdm(io); + is_tdm_split = rsnd_runtime_is_tdm_split(io); if (!rsnd_ssi_is_run_mods(mod, io)) return 0; @@ -517,6 +559,38 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod, ssi->wsr = 0; } + /* disable busif buffer over/under run interrupt. */ + if (is_tdm || is_tdm_split) { + switch (id) { + case 0: + case 1: + case 2: + case 3: + case 4: + for (i = 0; i < 4; i++) { + sys_int_enable = rsnd_mod_read(mod, + SSI_SYS_INT_ENABLE(i * 2)); + sys_int_enable &= ~(0xf << (id * 4)); + rsnd_mod_write(mod, + SSI_SYS_INT_ENABLE(i * 2), + sys_int_enable); + } + + break; + case 9: + for (i = 0; i < 4; i++) { + sys_int_enable = rsnd_mod_read(mod, + SSI_SYS_INT_ENABLE((i * 2) + 1)); + sys_int_enable &= ~(0xf << 4); + rsnd_mod_write(mod, + SSI_SYS_INT_ENABLE((i * 2) + 1), + sys_int_enable); + } + + break; + } + } + return 0; } @@ -622,6 +696,11 @@ static int rsnd_ssi_irq(struct rsnd_mod *mod, int enable) { u32 val = 0; + int is_tdm, is_tdm_split; + int id = rsnd_mod_id(mod); + + is_tdm = rsnd_runtime_is_tdm(io); + is_tdm_split = rsnd_runtime_is_tdm_split(io); if (rsnd_is_gen1(priv)) return 0; @@ -635,6 +714,19 @@ static int rsnd_ssi_irq(struct rsnd_mod *mod, if (enable) val = rsnd_ssi_is_dma_mode(mod) ? 0x0e000000 : 0x0f000000; + if (is_tdm || is_tdm_split) { + switch (id) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 9: + val |= 0x0000ff00; + break; + } + } + rsnd_mod_write(mod, SSI_INT_ENABLE, val); return 0; @@ -651,6 +743,12 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod, u32 status; bool elapsed = false; bool stop = false; + int id = rsnd_mod_id(mod); + int i; + int is_tdm, is_tdm_split; + + is_tdm = rsnd_runtime_is_tdm(io); + is_tdm_split = rsnd_runtime_is_tdm_split(io); spin_lock(&priv->lock); @@ -672,6 +770,53 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod, stop = true; } + status = 0; + + if (is_tdm || is_tdm_split) { + switch (id) { + case 0: + case 1: + case 2: + case 3: + case 4: + for (i = 0; i < 4; i++) { + status = rsnd_mod_read(mod, + SSI_SYS_STATUS(i * 2)); + status &= 0xf << (id * 4); + + if (status) { + rsnd_dbg_irq_status(dev, + "%s err status : 0x%08x\n", + rsnd_mod_name(mod), status); + rsnd_mod_write(mod, + SSI_SYS_STATUS(i * 2), + 0xf << (id * 4)); + stop = true; + break; + } + } + break; + case 9: + for (i = 0; i < 4; i++) { + status = rsnd_mod_read(mod, + SSI_SYS_STATUS((i * 2) + 1)); + status &= 0xf << 4; + + if (status) { + rsnd_dbg_irq_status(dev, + "%s err status : 0x%08x\n", + rsnd_mod_name(mod), status); + rsnd_mod_write(mod, + SSI_SYS_STATUS((i * 2) + 1), + 0xf << 4); + stop = true; + break; + } + } + break; + } + } + rsnd_ssi_status_clear(mod); rsnd_ssi_interrupt_out: spin_unlock(&priv->lock); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index c85b6a7f6aea..a856eabf5f99 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1897,9 +1897,25 @@ match: dai_link->platforms->name = component->name; /* convert non BE into BE */ - dai_link->no_pcm = 1; - dai_link->dpcm_playback = 1; - dai_link->dpcm_capture = 1; + if (!dai_link->no_pcm) { + dai_link->no_pcm = 1; + + if (dai_link->dpcm_playback) + dev_warn(card->dev, + "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_playback=1\n", + dai_link->name); + if (dai_link->dpcm_capture) + dev_warn(card->dev, + "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_capture=1\n", + dai_link->name); + + /* convert normal link into DPCM one */ + if (!(dai_link->dpcm_playback || + dai_link->dpcm_capture)) { + dai_link->dpcm_playback = !dai_link->capture_only; + dai_link->dpcm_capture = !dai_link->playback_only; + } + } /* override any BE fixups */ dai_link->be_hw_params_fixup = diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 65c91abb9462..0100f123484e 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -1284,17 +1284,29 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg, list_add(&routes[i]->dobj.list, &tplg->comp->dobj_list); ret = soc_tplg_add_route(tplg, routes[i]); - if (ret < 0) + if (ret < 0) { + /* + * this route was added to the list, it will + * be freed in remove_route() so increment the + * counter to skip it in the error handling + * below. + */ + i++; break; + } /* add route, but keep going if some fail */ snd_soc_dapm_add_routes(dapm, routes[i], 1); } - /* free memory allocated for all dapm routes in case of error */ - if (ret < 0) - for (i = 0; i < count ; i++) - kfree(routes[i]); + /* + * free memory allocated for all dapm routes not added to the + * list in case of error + */ + if (ret < 0) { + while (i < count) + kfree(routes[i++]); + } /* * free pointer to array of dapm routes as this is no longer needed. @@ -1382,7 +1394,6 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create( if (err < 0) { dev_err(tplg->dev, "ASoC: failed to init %s\n", mc->hdr.name); - soc_tplg_free_tlv(tplg, &kc[i]); goto err_sm; } } @@ -1390,6 +1401,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create( err_sm: for (; i >= 0; i--) { + soc_tplg_free_tlv(tplg, &kc[i]); sm = (struct soc_mixer_control *)kc[i].private_value; kfree(sm); kfree(kc[i].name); diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c index 6a252f2ebbc4..fa344968986a 100644 --- a/sound/soc/sof/core.c +++ b/sound/soc/sof/core.c @@ -307,6 +307,7 @@ static int sof_probe_continue(struct snd_sof_dev *sdev) /* init the IPC */ sdev->ipc = snd_sof_ipc_init(sdev); if (!sdev->ipc) { + ret = -ENOMEM; dev_err(sdev->dev, "error: failed to init DSP IPC %d\n", ret); goto ipc_err; } diff --git a/sound/soc/sof/imx/Kconfig b/sound/soc/sof/imx/Kconfig index 30232ff20cd3..805e8b43088e 100644 --- a/sound/soc/sof/imx/Kconfig +++ b/sound/soc/sof/imx/Kconfig @@ -14,7 +14,7 @@ if SND_SOC_SOF_IMX_TOPLEVEL config SND_SOC_SOF_IMX8_SUPPORT tristate "SOF support for i.MX8" depends on IMX_SCU - depends on IMX_DSP + select IMX_DSP help This adds support for Sound Open Firmware for NXP i.MX8 platforms Say Y if you have such a device. diff --git a/sound/soc/sof/nocodec.c b/sound/soc/sof/nocodec.c index 2233146386cc..849c3bcdca9e 100644 --- a/sound/soc/sof/nocodec.c +++ b/sound/soc/sof/nocodec.c @@ -14,6 +14,7 @@ static struct snd_soc_card sof_nocodec_card = { .name = "nocodec", /* the sof- prefix is added by the core */ + .owner = THIS_MODULE }; static int sof_nocodec_bes_setup(struct device *dev, @@ -52,8 +53,10 @@ static int sof_nocodec_bes_setup(struct device *dev, links[i].platforms->name = dev_name(dev); links[i].codecs->dai_name = "snd-soc-dummy-dai"; links[i].codecs->name = "snd-soc-dummy"; - links[i].dpcm_playback = 1; - links[i].dpcm_capture = 1; + if (ops->drv[i].playback.channels_min) + links[i].dpcm_playback = 1; + if (ops->drv[i].capture.channels_min) + links[i].dpcm_capture = 1; } card->dai_link = links; diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c index a42f43b40042..6b6663744b04 100644 --- a/sound/soc/sof/sof-pci-dev.c +++ b/sound/soc/sof/sof-pci-dev.c @@ -432,6 +432,8 @@ static const struct pci_device_id sof_pci_ids[] = { #if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H) { PCI_DEVICE(0x8086, 0x06c8), .driver_data = (unsigned long)&cml_desc}, + { PCI_DEVICE(0x8086, 0xa3f0), /* CML-S */ + .driver_data = (unsigned long)&cml_desc}, #endif #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE) { PCI_DEVICE(0x8086, 0xa0c8), diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c index 635eacbd28d4..156e3b9d613c 100644 --- a/sound/soc/tegra/tegra30_ahub.c +++ b/sound/soc/tegra/tegra30_ahub.c @@ -643,8 +643,10 @@ static int tegra30_ahub_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(ahub->regmap_ahub); ret |= regcache_sync(ahub->regmap_apbif); pm_runtime_put(dev); diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index e6d548fa980b..8894b7c16a01 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c @@ -538,8 +538,10 @@ static int tegra30_i2s_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(i2s->regmap); pm_runtime_put(dev); diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c index 6211dfda2195..0fa01cacfec9 100644 --- a/sound/soc/tegra/tegra_wm8903.c +++ b/sound/soc/tegra/tegra_wm8903.c @@ -159,6 +159,7 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd) struct snd_soc_component *component = codec_dai->component; struct snd_soc_card *card = rtd->card; struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card); + int shrt = 0; if (gpio_is_valid(machine->gpio_hp_det)) { tegra_wm8903_hp_jack_gpio.gpio = machine->gpio_hp_det; @@ -171,12 +172,15 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd) &tegra_wm8903_hp_jack_gpio); } + if (of_property_read_bool(card->dev->of_node, "nvidia,headset")) + shrt = SND_JACK_MICROPHONE; + snd_soc_card_jack_new(rtd->card, "Mic Jack", SND_JACK_MICROPHONE, &tegra_wm8903_mic_jack, tegra_wm8903_mic_jack_pins, ARRAY_SIZE(tegra_wm8903_mic_jack_pins)); wm8903_mic_detect(component, &tegra_wm8903_mic_jack, SND_JACK_MICROPHONE, - 0); + shrt); snd_soc_dapm_force_enable_pin(&card->dapm, "MICBIAS"); diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c index 7aa3c32e4a49..0541071f454b 100644 --- a/sound/soc/ti/davinci-mcasp.c +++ b/sound/soc/ti/davinci-mcasp.c @@ -1875,8 +1875,10 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp) PTR_ERR(chan)); return PTR_ERR(chan); } - if (WARN_ON(!chan->device || !chan->device->dev)) + if (WARN_ON(!chan->device || !chan->device->dev)) { + dma_release_channel(chan); return -EINVAL; + } if (chan->device->dev->of_node) ret = of_property_read_string(chan->device->dev->of_node, diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c index 26b503bbdb5f..3273b317fa3b 100644 --- a/sound/soc/ti/omap-mcbsp.c +++ b/sound/soc/ti/omap-mcbsp.c @@ -686,7 +686,7 @@ static int omap_mcbsp_init(struct platform_device *pdev) mcbsp->dma_data[1].addr = omap_mcbsp_dma_reg_params(mcbsp, SNDRV_PCM_STREAM_CAPTURE); - mcbsp->fclk = clk_get(&pdev->dev, "fck"); + mcbsp->fclk = devm_clk_get(&pdev->dev, "fck"); if (IS_ERR(mcbsp->fclk)) { ret = PTR_ERR(mcbsp->fclk); dev_err(mcbsp->dev, "unable to get fck: %d\n", ret); @@ -711,7 +711,7 @@ static int omap_mcbsp_init(struct platform_device *pdev) if (ret) { dev_err(mcbsp->dev, "Unable to create additional controls\n"); - goto err_thres; + return ret; } } @@ -724,8 +724,6 @@ static int omap_mcbsp_init(struct platform_device *pdev) err_st: if (mcbsp->pdata->buffer_size) sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group); -err_thres: - clk_put(mcbsp->fclk); return ret; } @@ -1442,8 +1440,6 @@ static int asoc_mcbsp_remove(struct platform_device *pdev) omap_mcbsp_st_cleanup(pdev); - clk_put(mcbsp->fclk); - return 0; } diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c index 2873e8e6f02b..cdae1190b930 100644 --- a/sound/soc/ux500/mop500.c +++ b/sound/soc/ux500/mop500.c @@ -63,10 +63,11 @@ static void mop500_of_node_put(void) { int i; - for (i = 0; i < 2; i++) { + for (i = 0; i < 2; i++) of_node_put(mop500_dai_links[i].cpus->of_node); - of_node_put(mop500_dai_links[i].codecs->of_node); - } + + /* Both links use the same codec, which is refcounted only once */ + of_node_put(mop500_dai_links[0].codecs->of_node); } static int mop500_of_probe(struct platform_device *pdev, @@ -81,7 +82,9 @@ static int mop500_of_probe(struct platform_device *pdev, if (!(msp_np[0] && msp_np[1] && codec_np)) { dev_err(&pdev->dev, "Phandle missing or invalid\n"); - mop500_of_node_put(); + for (i = 0; i < 2; i++) + of_node_put(msp_np[i]); + of_node_put(codec_np); return -EINVAL; } diff --git a/sound/usb/card.c b/sound/usb/card.c index f9a64e9526f5..230d862cfa3a 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -659,10 +659,14 @@ static int usb_audio_probe(struct usb_interface *intf, goto __error; } - /* we are allowed to call snd_card_register() many times */ - err = snd_card_register(chip->card); - if (err < 0) - goto __error; + /* we are allowed to call snd_card_register() many times, but first + * check to see if a device needs to skip it or do anything special + */ + if (!snd_usb_registration_quirk(chip, ifnum)) { + err = snd_card_register(chip->card); + if (err < 0) + goto __error; + } if (quirk && quirk->shares_media_device) { /* don't want to fail when snd_media_device_create() fails */ diff --git a/sound/usb/card.h b/sound/usb/card.h index 395403a2d33f..d8ec5caf464d 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -104,6 +104,7 @@ struct snd_usb_endpoint { int iface, altsetting; int skip_packets; /* quirks for devices to ignore the first n packets in a stream */ + bool is_implicit_feedback; /* This endpoint is used as implicit feedback */ spinlock_t lock; struct list_head list; @@ -132,6 +133,7 @@ struct snd_usb_substream { unsigned int tx_length_quirk:1; /* add length specifier to transfers */ unsigned int fmt_type; /* USB audio format type (1-3) */ unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */ + unsigned int stream_offset_adj; /* Bytes to drop from beginning of stream (for non-compliant devices) */ unsigned int running: 1; /* running status */ diff --git a/sound/usb/clock.c b/sound/usb/clock.c index a48313dfa967..b118cf97607f 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -151,16 +151,15 @@ static int uac_clock_selector_set_val(struct snd_usb_audio *chip, int selector_i return ret; } -/* - * Assume the clock is valid if clock source supports only one single sample - * rate, the terminal is connected directly to it (there is no clock selector) - * and clock type is internal. This is to deal with some Denon DJ controllers - * that always reports that clock is invalid. - */ static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip, struct audioformat *fmt, int source_id) { + bool ret = false; + int count; + unsigned char data; + struct usb_device *dev = chip->dev; + if (fmt->protocol == UAC_VERSION_2) { struct uac_clock_source_descriptor *cs_desc = snd_usb_find_clock_source(chip->ctrl_intf, source_id); @@ -168,13 +167,51 @@ static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip, if (!cs_desc) return false; - return (fmt->nr_rates == 1 && - (fmt->clock & 0xff) == cs_desc->bClockID && - (cs_desc->bmAttributes & 0x3) != - UAC_CLOCK_SOURCE_TYPE_EXT); + /* + * Assume the clock is valid if clock source supports only one + * single sample rate, the terminal is connected directly to it + * (there is no clock selector) and clock type is internal. + * This is to deal with some Denon DJ controllers that always + * reports that clock is invalid. + */ + if (fmt->nr_rates == 1 && + (fmt->clock & 0xff) == cs_desc->bClockID && + (cs_desc->bmAttributes & 0x3) != + UAC_CLOCK_SOURCE_TYPE_EXT) + return true; } - return false; + /* + * MOTU MicroBook IIc + * Sample rate changes takes more than 2 seconds for this device. Clock + * validity request returns false during that period. + */ + if (chip->usb_id == USB_ID(0x07fd, 0x0004)) { + count = 0; + + while ((!ret) && (count < 50)) { + int err; + + msleep(100); + + err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, + USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, + UAC2_CS_CONTROL_CLOCK_VALID << 8, + snd_usb_ctrl_intf(chip) | (source_id << 8), + &data, sizeof(data)); + if (err < 0) { + dev_warn(&dev->dev, + "%s(): cannot get clock validity for id %d\n", + __func__, source_id); + return false; + } + + ret = !!data; + count++; + } + } + + return ret; } static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 4a9a2f6ef5a4..87cc249a31b9 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -321,17 +321,17 @@ static void queue_pending_output_urbs(struct snd_usb_endpoint *ep) ep->next_packet_read_pos %= MAX_URBS; /* take URB out of FIFO */ - if (!list_empty(&ep->ready_playback_urbs)) + if (!list_empty(&ep->ready_playback_urbs)) { ctx = list_first_entry(&ep->ready_playback_urbs, struct snd_urb_ctx, ready_list); + list_del_init(&ctx->ready_list); + } } spin_unlock_irqrestore(&ep->lock, flags); if (ctx == NULL) return; - list_del_init(&ctx->ready_list); - /* copy over the length information */ for (i = 0; i < packet->packets; i++) ctx->packet_size[i] = packet->packet_size[i]; @@ -497,6 +497,8 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip, list_add_tail(&ep->list, &chip->ep_list); + ep->is_implicit_feedback = 0; + __exit_unlock: mutex_unlock(&chip->mutex); @@ -596,6 +598,178 @@ static void release_urbs(struct snd_usb_endpoint *ep, int force) ep->nurbs = 0; } +/* + * Check data endpoint for format differences + */ +static bool check_ep_params(struct snd_usb_endpoint *ep, + snd_pcm_format_t pcm_format, + unsigned int channels, + unsigned int period_bytes, + unsigned int frames_per_period, + unsigned int periods_per_buffer, + struct audioformat *fmt, + struct snd_usb_endpoint *sync_ep) +{ + unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb; + unsigned int max_packs_per_period, urbs_per_period, urb_packs; + unsigned int max_urbs; + int frame_bits = snd_pcm_format_physical_width(pcm_format) * channels; + int tx_length_quirk = (ep->chip->tx_length_quirk && + usb_pipeout(ep->pipe)); + bool ret = 1; + + if (pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) { + /* + * When operating in DSD DOP mode, the size of a sample frame + * in hardware differs from the actual physical format width + * because we need to make room for the DOP markers. + */ + frame_bits += channels << 3; + } + + ret = ret && (ep->datainterval == fmt->datainterval); + ret = ret && (ep->stride == frame_bits >> 3); + + switch (pcm_format) { + case SNDRV_PCM_FORMAT_U8: + ret = ret && (ep->silence_value == 0x80); + break; + case SNDRV_PCM_FORMAT_DSD_U8: + case SNDRV_PCM_FORMAT_DSD_U16_LE: + case SNDRV_PCM_FORMAT_DSD_U32_LE: + case SNDRV_PCM_FORMAT_DSD_U16_BE: + case SNDRV_PCM_FORMAT_DSD_U32_BE: + ret = ret && (ep->silence_value == 0x69); + break; + default: + ret = ret && (ep->silence_value == 0); + } + + /* assume max. frequency is 50% higher than nominal */ + ret = ret && (ep->freqmax == ep->freqn + (ep->freqn >> 1)); + /* Round up freqmax to nearest integer in order to calculate maximum + * packet size, which must represent a whole number of frames. + * This is accomplished by adding 0x0.ffff before converting the + * Q16.16 format into integer. + * In order to accurately calculate the maximum packet size when + * the data interval is more than 1 (i.e. ep->datainterval > 0), + * multiply by the data interval prior to rounding. For instance, + * a freqmax of 41 kHz will result in a max packet size of 6 (5.125) + * frames with a data interval of 1, but 11 (10.25) frames with a + * data interval of 2. + * (ep->freqmax << ep->datainterval overflows at 8.192 MHz for the + * maximum datainterval value of 3, at USB full speed, higher for + * USB high speed, noting that ep->freqmax is in units of + * frames per packet in Q16.16 format.) + */ + maxsize = (((ep->freqmax << ep->datainterval) + 0xffff) >> 16) * + (frame_bits >> 3); + if (tx_length_quirk) + maxsize += sizeof(__le32); /* Space for length descriptor */ + /* but wMaxPacketSize might reduce this */ + if (ep->maxpacksize && ep->maxpacksize < maxsize) { + /* whatever fits into a max. size packet */ + unsigned int data_maxsize = maxsize = ep->maxpacksize; + + if (tx_length_quirk) + /* Need to remove the length descriptor to calc freq */ + data_maxsize -= sizeof(__le32); + ret = ret && (ep->freqmax == (data_maxsize / (frame_bits >> 3)) + << (16 - ep->datainterval)); + } + + if (ep->fill_max) + ret = ret && (ep->curpacksize == ep->maxpacksize); + else + ret = ret && (ep->curpacksize == maxsize); + + if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) { + packs_per_ms = 8 >> ep->datainterval; + max_packs_per_urb = MAX_PACKS_HS; + } else { + packs_per_ms = 1; + max_packs_per_urb = MAX_PACKS; + } + if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep)) + max_packs_per_urb = min(max_packs_per_urb, + 1U << sync_ep->syncinterval); + max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval); + + /* + * Capture endpoints need to use small URBs because there's no way + * to tell in advance where the next period will end, and we don't + * want the next URB to complete much after the period ends. + * + * Playback endpoints with implicit sync much use the same parameters + * as their corresponding capture endpoint. + */ + if (usb_pipein(ep->pipe) || + snd_usb_endpoint_implicit_feedback_sink(ep)) { + + urb_packs = packs_per_ms; + /* + * Wireless devices can poll at a max rate of once per 4ms. + * For dataintervals less than 5, increase the packet count to + * allow the host controller to use bursting to fill in the + * gaps. + */ + if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_WIRELESS) { + int interval = ep->datainterval; + + while (interval < 5) { + urb_packs <<= 1; + ++interval; + } + } + /* make capture URBs <= 1 ms and smaller than a period */ + urb_packs = min(max_packs_per_urb, urb_packs); + while (urb_packs > 1 && urb_packs * maxsize >= period_bytes) + urb_packs >>= 1; + ret = ret && (ep->nurbs == MAX_URBS); + + /* + * Playback endpoints without implicit sync are adjusted so that + * a period fits as evenly as possible in the smallest number of + * URBs. The total number of URBs is adjusted to the size of the + * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits. + */ + } else { + /* determine how small a packet can be */ + minsize = (ep->freqn >> (16 - ep->datainterval)) * + (frame_bits >> 3); + /* with sync from device, assume it can be 12% lower */ + if (sync_ep) + minsize -= minsize >> 3; + minsize = max(minsize, 1u); + + /* how many packets will contain an entire ALSA period? */ + max_packs_per_period = DIV_ROUND_UP(period_bytes, minsize); + + /* how many URBs will contain a period? */ + urbs_per_period = DIV_ROUND_UP(max_packs_per_period, + max_packs_per_urb); + /* how many packets are needed in each URB? */ + urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period); + + /* limit the number of frames in a single URB */ + ret = ret && (ep->max_urb_frames == + DIV_ROUND_UP(frames_per_period, urbs_per_period)); + + /* try to use enough URBs to contain an entire ALSA buffer */ + max_urbs = min((unsigned) MAX_URBS, + MAX_QUEUE * packs_per_ms / urb_packs); + ret = ret && (ep->nurbs == min(max_urbs, + urbs_per_period * periods_per_buffer)); + } + + ret = ret && (ep->datainterval == fmt->datainterval); + ret = ret && (ep->maxpacksize == fmt->maxpacksize); + ret = ret && + (ep->fill_max == !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX)); + + return ret; +} + /* * configure a data endpoint */ @@ -861,10 +1035,23 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, int err; if (ep->use_count != 0) { - usb_audio_warn(ep->chip, - "Unable to change format on ep #%x: already in use\n", - ep->ep_num); - return -EBUSY; + bool check = ep->is_implicit_feedback && + check_ep_params(ep, pcm_format, + channels, period_bytes, + period_frames, buffer_periods, + fmt, sync_ep); + + if (!check) { + usb_audio_warn(ep->chip, + "Unable to change format on ep #%x: already in use\n", + ep->ep_num); + return -EBUSY; + } + + usb_audio_dbg(ep->chip, + "Ep #%x already in use as implicit feedback but format not changed\n", + ep->ep_num); + return 0; } /* release old buffers, if any */ diff --git a/sound/usb/line6/capture.c b/sound/usb/line6/capture.c index 82abef3fe90d..4b6e99e055dc 100644 --- a/sound/usb/line6/capture.c +++ b/sound/usb/line6/capture.c @@ -287,6 +287,8 @@ int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm) urb->interval = LINE6_ISO_INTERVAL; urb->error_count = 0; urb->complete = audio_in_callback; + if (usb_urb_ep_type_check(urb)) + return -EINVAL; } return 0; diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 4f096685ed65..0caf53f5764c 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -820,7 +820,7 @@ void line6_disconnect(struct usb_interface *interface) if (WARN_ON(usbdev != line6->usbdev)) return; - cancel_delayed_work(&line6->startup_work); + cancel_delayed_work_sync(&line6->startup_work); if (line6->urb_listen != NULL) line6_stop_listen(line6); diff --git a/sound/usb/line6/playback.c b/sound/usb/line6/playback.c index 2e8ead3f9bc2..797ced329b79 100644 --- a/sound/usb/line6/playback.c +++ b/sound/usb/line6/playback.c @@ -432,6 +432,8 @@ int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm) urb->interval = LINE6_ISO_INTERVAL; urb->error_count = 0; urb->complete = audio_out_callback; + if (usb_urb_ep_type_check(urb)) + return -EINVAL; } return 0; diff --git a/sound/usb/midi.c b/sound/usb/midi.c index b737f0ec77d0..0cb4142b05f6 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1499,6 +1499,8 @@ void snd_usbmidi_disconnect(struct list_head *p) spin_unlock_irq(&umidi->disc_lock); up_write(&umidi->disc_rwsem); + del_timer_sync(&umidi->error_timer); + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i]; if (ep->out) @@ -1525,7 +1527,6 @@ void snd_usbmidi_disconnect(struct list_head *p) ep->in = NULL; } } - del_timer_sync(&umidi->error_timer); } EXPORT_SYMBOL(snd_usbmidi_disconnect); @@ -2282,16 +2283,22 @@ void snd_usbmidi_input_stop(struct list_head *p) } EXPORT_SYMBOL(snd_usbmidi_input_stop); -static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint *ep) +static void snd_usbmidi_input_start_ep(struct snd_usb_midi *umidi, + struct snd_usb_midi_in_endpoint *ep) { unsigned int i; + unsigned long flags; if (!ep) return; for (i = 0; i < INPUT_URBS; ++i) { struct urb *urb = ep->urbs[i]; - urb->dev = ep->umidi->dev; - snd_usbmidi_submit_urb(urb, GFP_KERNEL); + spin_lock_irqsave(&umidi->disc_lock, flags); + if (!atomic_read(&urb->use_count)) { + urb->dev = ep->umidi->dev; + snd_usbmidi_submit_urb(urb, GFP_ATOMIC); + } + spin_unlock_irqrestore(&umidi->disc_lock, flags); } } @@ -2307,7 +2314,7 @@ void snd_usbmidi_input_start(struct list_head *p) if (umidi->input_running || !umidi->opened[1]) return; for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) - snd_usbmidi_input_start_ep(umidi->endpoints[i].in); + snd_usbmidi_input_start_ep(umidi, umidi->endpoints[i].in); umidi->input_running = 1; } EXPORT_SYMBOL(snd_usbmidi_input_start); diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index f55afe3a98e3..9079c380228f 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -576,8 +576,9 @@ static int check_matrix_bitmap(unsigned char *bmap, * if failed, give up and free the control instance. */ -int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, - struct snd_kcontrol *kctl) +int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, + struct snd_kcontrol *kctl, + bool is_std_info) { struct usb_mixer_interface *mixer = list->mixer; int err; @@ -591,6 +592,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, return err; } list->kctl = kctl; + list->is_std_info = is_std_info; list->next_id_elem = mixer->id_elems[list->id]; mixer->id_elems[list->id] = list; return 0; @@ -3213,8 +3215,11 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) unitid = delegate_notify(mixer, unitid, NULL, NULL); for_each_mixer_elem(list, mixer, unitid) { - struct usb_mixer_elem_info *info = - mixer_elem_list_to_info(list); + struct usb_mixer_elem_info *info; + + if (!list->is_std_info) + continue; + info = mixer_elem_list_to_info(list); /* invalidate cache, so the value is read from the device */ info->cached = 0; snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, @@ -3294,6 +3299,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, if (!list->kctl) continue; + if (!list->is_std_info) + continue; info = mixer_elem_list_to_info(list); if (count > 1 && info->control != control) diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index 8e0fb7fdf1a0..01b5e5cc2221 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -66,6 +66,7 @@ struct usb_mixer_elem_list { struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */ struct snd_kcontrol *kctl; unsigned int id; + bool is_std_info; usb_mixer_elem_dump_func_t dump; usb_mixer_elem_resume_func_t resume; }; @@ -103,8 +104,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid); int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int value_set); -int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, - struct snd_kcontrol *kctl); +int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, + struct snd_kcontrol *kctl, + bool is_std_info); + +#define snd_usb_mixer_add_control(list, kctl) \ + snd_usb_mixer_add_list(list, kctl, true) void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list, struct usb_mixer_interface *mixer, diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index dc181066c799..49f0dc0e3e4d 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -157,7 +157,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer, return -ENOMEM; } kctl->private_free = snd_usb_mixer_elem_free; - return snd_usb_mixer_add_control(list, kctl); + /* don't use snd_usb_mixer_add_control() here, this is a special list element */ + return snd_usb_mixer_add_list(list, kctl, false); } /* @@ -183,6 +184,7 @@ static const struct rc_config { { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */ { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ + { USB_ID(0x041e, 0x3263), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */ }; diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index ad8f38380aa3..878f1201aad6 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -344,11 +344,20 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, ep = 0x81; ifnum = 1; goto add_sync_ep_from_ifnum; - case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II */ + case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II/IIc */ + /* MicroBook IIc */ + if (altsd->bInterfaceClass == USB_CLASS_AUDIO) + return 0; + + /* MicroBook II */ ep = 0x84; ifnum = 0; goto add_sync_ep_from_ifnum; case USB_ID(0x07fd, 0x0008): /* MOTU M Series */ + case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */ + case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */ + case USB_ID(0x0499, 0x172f): /* Steinberg UR22C */ + case USB_ID(0x0d9a, 0x00df): /* RTX6001 */ ep = 0x81; ifnum = 2; goto add_sync_ep_from_ifnum; @@ -386,6 +395,8 @@ add_sync_ep: if (!subs->sync_endpoint) return -EINVAL; + subs->sync_endpoint->is_implicit_feedback = 1; + subs->data_endpoint->sync_master = subs->sync_endpoint; return 1; @@ -484,12 +495,15 @@ static int set_sync_endpoint(struct snd_usb_substream *subs, implicit_fb ? SND_USB_ENDPOINT_TYPE_DATA : SND_USB_ENDPOINT_TYPE_SYNC); + if (!subs->sync_endpoint) { if (is_playback && attr == USB_ENDPOINT_SYNC_NONE) return 0; return -EINVAL; } + subs->sync_endpoint->is_implicit_feedback = implicit_fb; + subs->data_endpoint->sync_master = subs->sync_endpoint; return 0; @@ -1404,6 +1418,12 @@ static void retire_capture_urb(struct snd_usb_substream *subs, // continue; } bytes = urb->iso_frame_desc[i].actual_length; + if (subs->stream_offset_adj > 0) { + unsigned int adj = min(subs->stream_offset_adj, bytes); + cp += adj; + bytes -= adj; + subs->stream_offset_adj -= adj; + } frames = bytes / stride; if (!subs->txfr_quirk) bytes = frames * stride; @@ -1771,6 +1791,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream return 0; case SNDRV_PCM_TRIGGER_STOP: stop_endpoints(subs, false); + subs->data_endpoint->retire_data_urb = NULL; subs->running = 0; return 0; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 042a5e8eb79d..8c3b3a291ddb 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2695,6 +2695,10 @@ YAMAHA_DEVICE(0x7010, "UB99"), .ifnum = QUIRK_ANY_INTERFACE, .type = QUIRK_COMPOSITE, .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_STANDARD_MIXER, + }, { .ifnum = 0, .type = QUIRK_AUDIO_FIXED_ENDPOINT, @@ -2707,6 +2711,32 @@ YAMAHA_DEVICE(0x7010, "UB99"), .attributes = UAC_EP_CS_ATTR_SAMPLE_RATE, .endpoint = 0x01, .ep_attr = USB_ENDPOINT_XFER_ISOC, + .datainterval = 1, + .maxpacksize = 0x024c, + .rates = SNDRV_PCM_RATE_44100 | + SNDRV_PCM_RATE_48000, + .rate_min = 44100, + .rate_max = 48000, + .nr_rates = 2, + .rate_table = (unsigned int[]) { + 44100, 48000 + } + } + }, + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 2, + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .attributes = 0, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC, + .datainterval = 1, + .maxpacksize = 0x0126, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .rate_min = 44100, @@ -2776,90 +2806,6 @@ YAMAHA_DEVICE(0x7010, "UB99"), .type = QUIRK_MIDI_NOVATION } }, -{ - /* - * Focusrite Scarlett Solo 2nd generation - * Reports that playback should use Synch: Synchronous - * while still providing a feedback endpoint. Synchronous causes - * snapping on some sample rates. - * Force it to use Synch: Asynchronous. - */ - USB_DEVICE(0x1235, 0x8205), - .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { - .ifnum = QUIRK_ANY_INTERFACE, - .type = QUIRK_COMPOSITE, - .data = (const struct snd_usb_audio_quirk[]) { - { - .ifnum = 1, - .type = QUIRK_AUDIO_FIXED_ENDPOINT, - .data = & (const struct audioformat) { - .formats = SNDRV_PCM_FMTBIT_S32_LE, - .channels = 2, - .iface = 1, - .altsetting = 1, - .altset_idx = 1, - .attributes = 0, - .endpoint = 0x01, - .ep_attr = USB_ENDPOINT_XFER_ISOC | - USB_ENDPOINT_SYNC_ASYNC, - .protocol = UAC_VERSION_2, - .rates = SNDRV_PCM_RATE_44100 | - SNDRV_PCM_RATE_48000 | - SNDRV_PCM_RATE_88200 | - SNDRV_PCM_RATE_96000 | - SNDRV_PCM_RATE_176400 | - SNDRV_PCM_RATE_192000, - .rate_min = 44100, - .rate_max = 192000, - .nr_rates = 6, - .rate_table = (unsigned int[]) { - 44100, 48000, 88200, - 96000, 176400, 192000 - }, - .clock = 41 - } - }, - { - .ifnum = 2, - .type = QUIRK_AUDIO_FIXED_ENDPOINT, - .data = & (const struct audioformat) { - .formats = SNDRV_PCM_FMTBIT_S32_LE, - .channels = 2, - .iface = 2, - .altsetting = 1, - .altset_idx = 1, - .attributes = 0, - .endpoint = 0x82, - .ep_attr = USB_ENDPOINT_XFER_ISOC | - USB_ENDPOINT_SYNC_ASYNC | - USB_ENDPOINT_USAGE_IMPLICIT_FB, - .protocol = UAC_VERSION_2, - .rates = SNDRV_PCM_RATE_44100 | - SNDRV_PCM_RATE_48000 | - SNDRV_PCM_RATE_88200 | - SNDRV_PCM_RATE_96000 | - SNDRV_PCM_RATE_176400 | - SNDRV_PCM_RATE_192000, - .rate_min = 44100, - .rate_max = 192000, - .nr_rates = 6, - .rate_table = (unsigned int[]) { - 44100, 48000, 88200, - 96000, 176400, 192000 - }, - .clock = 41 - } - }, - { - .ifnum = 3, - .type = QUIRK_IGNORE_INTERFACE - }, - { - .ifnum = -1 - } - } - } -}, /* Access Music devices */ { @@ -3492,7 +3438,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), }, /* MOTU Microbook II */ { - USB_DEVICE(0x07fd, 0x0004), + USB_DEVICE_VENDOR_SPEC(0x07fd, 0x0004), .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { .vendor_name = "MOTU", .product_name = "MicroBookII", @@ -3654,6 +3600,62 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } } }, +{ + /* + * PIONEER DJ DDJ-RB + * PCM is 4 channels out, 2 dummy channels in @ 44.1 fixed + * The feedback for the output is the dummy input. + */ + USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000e), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 4, + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .endpoint = 0x01, + .ep_attr = USB_ENDPOINT_XFER_ISOC| + USB_ENDPOINT_SYNC_ASYNC, + .rates = SNDRV_PCM_RATE_44100, + .rate_min = 44100, + .rate_max = 44100, + .nr_rates = 1, + .rate_table = (unsigned int[]) { 44100 } + } + }, + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 2, + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC| + USB_ENDPOINT_SYNC_ASYNC| + USB_ENDPOINT_USAGE_IMPLICIT_FB, + .rates = SNDRV_PCM_RATE_44100, + .rate_min = 44100, + .rate_max = 44100, + .nr_rates = 1, + .rate_table = (unsigned int[]) { 44100 } + } + }, + { + .ifnum = -1 + } + } + } +}, #define ALC1220_VB_DESKTOP(vend, prod) { \ USB_DEVICE(vend, prod), \ @@ -3695,4 +3697,62 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */ } }, +/* + * MacroSilicon MS2109 based HDMI capture cards + * + * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch. + * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if + * they pretend to be 96kHz mono as a workaround for stereo being broken + * by that... + * + * They also have an issue with initial stream alignment that causes the + * channels to be swapped and out of phase, which is dealt with in quirks.c. + */ +{ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | + USB_DEVICE_ID_MATCH_INT_CLASS | + USB_DEVICE_ID_MATCH_INT_SUBCLASS, + .idVendor = 0x534d, + .idProduct = 0x2109, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "MacroSilicon", + .product_name = "MS2109", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = &(const struct snd_usb_audio_quirk[]) { + { + .ifnum = 2, + .type = QUIRK_AUDIO_ALIGN_TRANSFER, + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_MIXER, + }, + { + .ifnum = 3, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels = 2, + .iface = 3, + .altsetting = 1, + .altset_idx = 1, + .attributes = 0, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC | + USB_ENDPOINT_SYNC_ASYNC, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 48000, + .rate_max = 48000, + } + }, + { + .ifnum = -1 + } + } + } +}, + #undef USB_DEVICE_VENDOR_SPEC diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 092720ce2c55..a756f50d9f07 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1316,7 +1316,15 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev, case USB_ID(0x2466, 0x8010): /* Fractal Audio Axe-Fx 3 */ return snd_usb_axefx3_boot_quirk(dev); case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook II */ - return snd_usb_motu_microbookii_boot_quirk(dev); + /* + * For some reason interface 3 with vendor-spec class is + * detected on MicroBook IIc. + */ + if (get_iface_desc(intf->altsetting)->bInterfaceClass == + USB_CLASS_VENDOR_SPEC && + get_iface_desc(intf->altsetting)->bInterfaceNumber < 3) + return snd_usb_motu_microbookii_boot_quirk(dev); + break; } return 0; @@ -1424,6 +1432,9 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */ set_format_emu_quirk(subs, fmt); break; + case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */ + subs->stream_offset_adj = 2; + break; } } @@ -1461,6 +1472,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) static bool is_itf_usb_dsd_dac(unsigned int id) { switch (id) { + case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */ case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */ @@ -1602,6 +1614,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, chip->usb_id == USB_ID(0x0951, 0x16ad)) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) usleep_range(1000, 2000); + + /* + * Samsung USBC Headset (AKG) need a tiny delay after each + * class compliant request. (Model number: AAM625R or AAM627R) + */ + if (chip->usb_id == USB_ID(0x04e8, 0xa051) && + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) + usleep_range(5000, 6000); } /* @@ -1755,5 +1775,62 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip, else fp->ep_attr |= USB_ENDPOINT_SYNC_SYNC; break; + case USB_ID(0x07fd, 0x0004): /* MOTU MicroBook IIc */ + /* + * MaxPacketsOnly attribute is erroneously set in endpoint + * descriptors. As a result this card produces noise with + * all sample rates other than 96 KHz. + */ + fp->attributes &= ~UAC_EP_CS_ATTR_FILL_MAX; + break; + case USB_ID(0x1235, 0x8202): /* Focusrite Scarlett 2i2 2nd gen */ + case USB_ID(0x1235, 0x8205): /* Focusrite Scarlett Solo 2nd gen */ + /* + * Reports that playback should use Synch: Synchronous + * while still providing a feedback endpoint. + * Synchronous causes snapping on some sample rates. + * Force it to use Synch: Asynchronous. + */ + if (stream == SNDRV_PCM_STREAM_PLAYBACK) { + fp->ep_attr &= ~USB_ENDPOINT_SYNCTYPE; + fp->ep_attr |= USB_ENDPOINT_SYNC_ASYNC; + } + break; } } + +/* + * registration quirk: + * the registration is skipped if a device matches with the given ID, + * unless the interface reaches to the defined one. This is for delaying + * the registration until the last known interface, so that the card and + * devices appear at the same time. + */ + +struct registration_quirk { + unsigned int usb_id; /* composed via USB_ID() */ + unsigned int interface; /* the interface to trigger register */ +}; + +#define REG_QUIRK_ENTRY(vendor, product, iface) \ + { .usb_id = USB_ID(vendor, product), .interface = (iface) } + +static const struct registration_quirk registration_quirks[] = { + REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */ + REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */ + REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */ + { 0 } /* terminator */ +}; + +/* return true if skipping registration */ +bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface) +{ + const struct registration_quirk *q; + + for (q = registration_quirks; q->usb_id; q++) + if (chip->usb_id == q->usb_id) + return iface != q->interface; + + /* Register as normal */ + return false; +} diff --git a/sound/usb/quirks.h b/sound/usb/quirks.h index df0355843a4c..c76cf24a640a 100644 --- a/sound/usb/quirks.h +++ b/sound/usb/quirks.h @@ -51,4 +51,6 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip, struct audioformat *fp, int stream); +bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface); + #endif /* __USBAUDIO_QUIRKS_H */ diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 11785f9652ad..d01edd5da6cf 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -94,6 +94,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as, subs->tx_length_quirk = as->chip->tx_length_quirk; subs->speed = snd_usb_get_speed(subs->dev); subs->pkt_offset_adj = 0; + subs->stream_offset_adj = 0; snd_usb_set_pcm_ops(as->pcm, stream); diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index 9a9376d1d3df..66765f970bc5 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -510,7 +510,7 @@ static int do_dump(int argc, char **argv) goto done; } if (!btf) { - err = ENOENT; + err = -ENOENT; p_err("can't find btf with ID (%u)", btf_id); goto done; } diff --git a/tools/build/Build.include b/tools/build/Build.include index 9ec01f4454f9..585486e40995 100644 --- a/tools/build/Build.include +++ b/tools/build/Build.include @@ -74,7 +74,8 @@ dep-cmd = $(if $(wildcard $(fixdep)), # dependencies in the cmd file if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \ @set -e; \ - $(echo-cmd) $(cmd_$(1)) && $(dep-cmd)) + $(echo-cmd) $(cmd_$(1)); \ + $(dep-cmd)) # if_changed - execute command if any prerequisite is newer than # target, or command line has changed diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 8a19753cc26a..8c6e1ea67f21 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -8,7 +8,7 @@ endif feature_check = $(eval $(feature_check_code)) define feature_check_code - feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0) + feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CC="$(CC)" CXX="$(CXX)" CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0) endef feature_set = $(eval $(feature_set_code)) diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 8499385365c0..054e09ab4a9e 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -70,8 +70,6 @@ FILES= \ FILES := $(addprefix $(OUTPUT),$(FILES)) -CC ?= $(CROSS_COMPILE)gcc -CXX ?= $(CROSS_COMPILE)g++ PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config LLVM_CONFIG ?= llvm-config diff --git a/tools/cgroup/iocost_monitor.py b/tools/cgroup/iocost_monitor.py index 7e344a78a627..b8c082c9fd7d 100644 --- a/tools/cgroup/iocost_monitor.py +++ b/tools/cgroup/iocost_monitor.py @@ -112,14 +112,14 @@ class IocStat: def dict(self, now): return { 'device' : devname, - 'timestamp' : str(now), - 'enabled' : str(int(self.enabled)), - 'running' : str(int(self.running)), - 'period_ms' : str(self.period_ms), - 'period_at' : str(self.period_at), - 'period_vtime_at' : str(self.vperiod_at), - 'busy_level' : str(self.busy_level), - 'vrate_pct' : str(self.vrate_pct), } + 'timestamp' : now, + 'enabled' : self.enabled, + 'running' : self.running, + 'period_ms' : self.period_ms, + 'period_at' : self.period_at, + 'period_vtime_at' : self.vperiod_at, + 'busy_level' : self.busy_level, + 'vrate_pct' : self.vrate_pct, } def table_preamble_str(self): state = ('RUN' if self.running else 'IDLE') if self.enabled else 'OFF' @@ -179,19 +179,19 @@ class IocgStat: def dict(self, now, path): out = { 'cgroup' : path, - 'timestamp' : str(now), - 'is_active' : str(int(self.is_active)), - 'weight' : str(self.weight), - 'weight_active' : str(self.active), - 'weight_inuse' : str(self.inuse), - 'hweight_active_pct' : str(self.hwa_pct), - 'hweight_inuse_pct' : str(self.hwi_pct), - 'inflight_pct' : str(self.inflight_pct), - 'debt_ms' : str(self.debt_ms), - 'use_delay' : str(self.use_delay), - 'delay_ms' : str(self.delay_ms), - 'usage_pct' : str(self.usage), - 'address' : str(hex(self.address)) } + 'timestamp' : now, + 'is_active' : self.is_active, + 'weight' : self.weight, + 'weight_active' : self.active, + 'weight_inuse' : self.inuse, + 'hweight_active_pct' : self.hwa_pct, + 'hweight_inuse_pct' : self.hwi_pct, + 'inflight_pct' : self.inflight_pct, + 'debt_ms' : self.debt_ms, + 'use_delay' : self.use_delay, + 'delay_ms' : self.delay_ms, + 'usage_pct' : self.usage, + 'address' : self.address } for i in range(len(self.usages)): out[f'usage_pct_{i}'] = str(self.usages[i]) return out diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index bb7b271397a6..fabe5aeaa351 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -1131,7 +1131,7 @@ union perf_mem_data_src { #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ /* 1 free */ -#define PERF_MEM_SNOOPX_SHIFT 37 +#define PERF_MEM_SNOOPX_SHIFT 38 /* locked instruction */ #define PERF_MEM_LOCK_NA 0x01 /* not available */ diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c index bd021a0eeef8..4cc69675c2a9 100644 --- a/tools/lib/api/fs/fs.c +++ b/tools/lib/api/fs/fs.c @@ -90,6 +90,7 @@ struct fs { const char * const *mounts; char path[PATH_MAX]; bool found; + bool checked; long magic; }; @@ -111,31 +112,37 @@ static struct fs fs__entries[] = { .name = "sysfs", .mounts = sysfs__fs_known_mountpoints, .magic = SYSFS_MAGIC, + .checked = false, }, [FS__PROCFS] = { .name = "proc", .mounts = procfs__known_mountpoints, .magic = PROC_SUPER_MAGIC, + .checked = false, }, [FS__DEBUGFS] = { .name = "debugfs", .mounts = debugfs__known_mountpoints, .magic = DEBUGFS_MAGIC, + .checked = false, }, [FS__TRACEFS] = { .name = "tracefs", .mounts = tracefs__known_mountpoints, .magic = TRACEFS_MAGIC, + .checked = false, }, [FS__HUGETLBFS] = { .name = "hugetlbfs", .mounts = hugetlbfs__known_mountpoints, .magic = HUGETLBFS_MAGIC, + .checked = false, }, [FS__BPF_FS] = { .name = "bpf", .mounts = bpf_fs__known_mountpoints, .magic = BPF_FS_MAGIC, + .checked = false, }, }; @@ -158,6 +165,7 @@ static bool fs__read_mounts(struct fs *fs) } fclose(fp); + fs->checked = true; return fs->found = found; } @@ -220,6 +228,7 @@ static bool fs__env_override(struct fs *fs) return false; fs->found = true; + fs->checked = true; strncpy(fs->path, override_path, sizeof(fs->path) - 1); fs->path[sizeof(fs->path) - 1] = '\0'; return true; @@ -246,6 +255,14 @@ static const char *fs__mountpoint(int idx) if (fs->found) return (const char *)fs->path; + /* the mount point was already checked for the mount point + * but and did not exist, so return NULL to avoid scanning again. + * This makes the found and not found paths cost equivalent + * in case of multiple calls. + */ + if (fs->checked) + return NULL; + return fs__get_mountpoint(fs); } diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h index 92d03b8396b1..3b70003e7cfb 100644 --- a/tools/lib/api/fs/fs.h +++ b/tools/lib/api/fs/fs.h @@ -18,6 +18,18 @@ const char *name##__mount(void); \ bool name##__configured(void); \ +/* + * The xxxx__mountpoint() entry points find the first match mount point for each + * filesystems listed below, where xxxx is the filesystem type. + * + * The interface is as follows: + * + * - If a mount point is found on first call, it is cached and used for all + * subsequent calls. + * + * - If a mount point is not found, NULL is returned on first call and all + * subsequent calls. + */ FS(sysfs) FS(procfs) FS(debugfs) diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 87f27e2664c5..d9e386b8f47e 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -1141,6 +1141,20 @@ static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack) } } +static void btf_dump_drop_mods(struct btf_dump *d, struct id_stack *decl_stack) +{ + const struct btf_type *t; + __u32 id; + + while (decl_stack->cnt) { + id = decl_stack->ids[decl_stack->cnt - 1]; + t = btf__type_by_id(d->btf, id); + if (!btf_is_mod(t)) + return; + decl_stack->cnt--; + } +} + static void btf_dump_emit_name(const struct btf_dump *d, const char *name, bool last_was_ptr) { @@ -1239,14 +1253,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, * a const/volatile modifier for array, so we are * going to silently skip them here. */ - while (decls->cnt) { - next_id = decls->ids[decls->cnt - 1]; - next_t = btf__type_by_id(d->btf, next_id); - if (btf_is_mod(next_t)) - decls->cnt--; - else - break; - } + btf_dump_drop_mods(d, decls); if (decls->cnt == 0) { btf_dump_emit_name(d, fname, last_was_ptr); @@ -1274,7 +1281,15 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, __u16 vlen = btf_vlen(t); int i; - btf_dump_emit_mods(d, decls); + /* + * GCC emits extra volatile qualifier for + * __attribute__((noreturn)) function pointers. Clang + * doesn't do it. It's a GCC quirk for backwards + * compatibility with code written for GCC <2.5. So, + * similarly to extra qualifiers for array, just drop + * them, instead of handling them. + */ + btf_dump_drop_mods(d, decls); if (decls->cnt) { btf_dump_printf(d, " ("); btf_dump_emit_type_chain(d, decls, fname, lvl); diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c index 6122272943e6..9ef9f6201d8b 100644 --- a/tools/lib/bpf/hashmap.c +++ b/tools/lib/bpf/hashmap.c @@ -56,7 +56,14 @@ struct hashmap *hashmap__new(hashmap_hash_fn hash_fn, void hashmap__clear(struct hashmap *map) { + struct hashmap_entry *cur, *tmp; + int bkt; + + hashmap__for_each_entry_safe(map, cur, tmp, bkt) { + free(cur); + } free(map->buckets); + map->buckets = NULL; map->cap = map->cap_bits = map->sz = 0; } diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 281cc65276e0..2a1dbf52fc9a 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -5358,9 +5358,12 @@ void perf_buffer__free(struct perf_buffer *pb) if (!pb) return; if (pb->cpu_bufs) { - for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) { + for (i = 0; i < pb->cpu_cnt; i++) { struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; + if (!cpu_buf) + continue; + bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); perf_buffer__free_cpu_buf(pb, cpu_buf); } diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index d948475585ce..4559a15e6657 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -1425,13 +1425,28 @@ static unsigned int type_size(const char *name) return 0; } +static int append(char **buf, const char *delim, const char *str) +{ + char *new_buf; + + new_buf = realloc(*buf, strlen(*buf) + strlen(delim) + strlen(str) + 1); + if (!new_buf) + return -1; + strcat(new_buf, delim); + strcat(new_buf, str); + *buf = new_buf; + return 0; +} + static int event_read_fields(struct tep_event *event, struct tep_format_field **fields) { struct tep_format_field *field = NULL; enum tep_event_type type; char *token; char *last_token; + char *delim = " "; int count = 0; + int ret; do { unsigned int size_dynamic = 0; @@ -1490,24 +1505,51 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** field->flags |= TEP_FIELD_IS_POINTER; if (field->type) { - char *new_type; - new_type = realloc(field->type, - strlen(field->type) + - strlen(last_token) + 2); - if (!new_type) { - free(last_token); - goto fail; - } - field->type = new_type; - strcat(field->type, " "); - strcat(field->type, last_token); + ret = append(&field->type, delim, last_token); free(last_token); + if (ret < 0) + goto fail; } else field->type = last_token; last_token = token; + delim = " "; continue; } + /* Handle __attribute__((user)) */ + if ((type == TEP_EVENT_DELIM) && + strcmp("__attribute__", last_token) == 0 && + token[0] == '(') { + int depth = 1; + int ret; + + ret = append(&field->type, " ", last_token); + ret |= append(&field->type, "", "("); + if (ret < 0) + goto fail; + + delim = " "; + while ((type = read_token(&token)) != TEP_EVENT_NONE) { + if (type == TEP_EVENT_DELIM) { + if (token[0] == '(') + depth++; + else if (token[0] == ')') + depth--; + if (!depth) + break; + ret = append(&field->type, "", token); + delim = ""; + } else { + ret = append(&field->type, delim, token); + delim = " "; + } + if (ret < 0) + goto fail; + free(last_token); + last_token = token; + } + continue; + } break; } @@ -1523,8 +1565,6 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** if (strcmp(token, "[") == 0) { enum tep_event_type last_type = type; char *brackets = token; - char *new_brackets; - int len; field->flags |= TEP_FIELD_IS_ARRAY; @@ -1536,29 +1576,27 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** field->arraylen = 0; while (strcmp(token, "]") != 0) { + const char *delim; + if (last_type == TEP_EVENT_ITEM && type == TEP_EVENT_ITEM) - len = 2; + delim = " "; else - len = 1; + delim = ""; + last_type = type; - new_brackets = realloc(brackets, - strlen(brackets) + - strlen(token) + len); - if (!new_brackets) { + ret = append(&brackets, delim, token); + if (ret < 0) { free(brackets); goto fail; } - brackets = new_brackets; - if (len == 2) - strcat(brackets, " "); - strcat(brackets, token); /* We only care about the last token */ field->arraylen = strtoul(token, NULL, 0); free_token(token); type = read_token(&token); if (type == TEP_EVENT_NONE) { + free(brackets); do_warning_event(event, "failed to find token"); goto fail; } @@ -1566,13 +1604,11 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** free_token(token); - new_brackets = realloc(brackets, strlen(brackets) + 2); - if (!new_brackets) { + ret = append(&brackets, "", "]"); + if (ret < 0) { free(brackets); goto fail; } - brackets = new_brackets; - strcat(brackets, "]"); /* add brackets to type */ @@ -1582,34 +1618,23 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** * the format: type [] item; */ if (type == TEP_EVENT_ITEM) { - char *new_type; - new_type = realloc(field->type, - strlen(field->type) + - strlen(field->name) + - strlen(brackets) + 2); - if (!new_type) { + ret = append(&field->type, " ", field->name); + if (ret < 0) { free(brackets); goto fail; } - field->type = new_type; - strcat(field->type, " "); - strcat(field->type, field->name); + ret = append(&field->type, "", brackets); + size_dynamic = type_size(field->name); free_token(field->name); - strcat(field->type, brackets); field->name = field->alias = token; type = read_token(&token); } else { - char *new_type; - new_type = realloc(field->type, - strlen(field->type) + - strlen(brackets) + 1); - if (!new_type) { + ret = append(&field->type, "", brackets); + if (ret < 0) { free(brackets); goto fail; } - field->type = new_type; - strcat(field->type, brackets); } free(brackets); } @@ -2046,19 +2071,16 @@ process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok) /* could just be a type pointer */ if ((strcmp(arg->op.op, "*") == 0) && type == TEP_EVENT_DELIM && (strcmp(token, ")") == 0)) { - char *new_atom; + int ret; if (left->type != TEP_PRINT_ATOM) { do_warning_event(event, "bad pointer type"); goto out_free; } - new_atom = realloc(left->atom.atom, - strlen(left->atom.atom) + 3); - if (!new_atom) + ret = append(&left->atom.atom, " ", "*"); + if (ret < 0) goto out_warn_free; - left->atom.atom = new_atom; - strcat(left->atom.atom, " *"); free(arg->op.op); *arg = *left; free(left); @@ -2839,6 +2861,7 @@ process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg, if (read_expected(TEP_EVENT_DELIM, ")") < 0) goto out_err; + free_token(token); type = read_token(&token); *tok = token; @@ -3151,18 +3174,15 @@ process_arg_token(struct tep_event *event, struct tep_print_arg *arg, } /* atoms can be more than one token long */ while (type == TEP_EVENT_ITEM) { - char *new_atom; - new_atom = realloc(atom, - strlen(atom) + strlen(token) + 2); - if (!new_atom) { + int ret; + + ret = append(&atom, " ", token); + if (ret < 0) { free(atom); *tok = NULL; free_token(token); return TEP_EVENT_ERROR; } - atom = new_atom; - strcat(atom, " "); - strcat(atom, token); free_token(token); type = read_token_item(&token); } diff --git a/tools/lib/traceevent/plugins/Makefile b/tools/lib/traceevent/plugins/Makefile index f440989fa55e..23c3535bcbd6 100644 --- a/tools/lib/traceevent/plugins/Makefile +++ b/tools/lib/traceevent/plugins/Makefile @@ -196,7 +196,7 @@ define do_generate_dynamic_list_file xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\ if [ "$$symbol_type" = "U W" ];then \ (echo '{'; \ - $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\ + $(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print "\t"$$2";"}' | sort -u;\ echo '};'; \ ) > $2; \ else \ diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index ee08aeff30a1..f591c4d1b6fe 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -3,9 +3,15 @@ include ../scripts/Makefile.include include ../scripts/Makefile.arch # always use the host compiler +ifneq ($(LLVM),) +HOSTAR ?= llvm-ar +HOSTCC ?= clang +HOSTLD ?= ld.lld +else HOSTAR ?= ar HOSTCC ?= gcc HOSTLD ?= ld +endif AR = $(HOSTAR) CC = $(HOSTCC) LD = $(HOSTLD) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index fcc6cd404f56..48b234d8f251 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -865,6 +865,12 @@ static int add_special_section_alts(struct objtool_file *file) } if (special_alt->group) { + if (!special_alt->orig_len) { + WARN_FUNC("empty alternative entry", + orig_insn->sec, orig_insn->offset); + continue; + } + ret = handle_group_alt(file, special_alt, orig_insn, &new_insn); if (ret) diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index c6f9f31b6039..15fd108afbe6 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -33,6 +33,10 @@ OPTIONS - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. + - a symbolic or raw PMU event followed by an optional colon + and a list of event modifiers, e.g., cpu-cycles:p. See the + linkperf:perf-list[1] man page for details on event modifiers. + - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where 'param1', 'param2', etc are defined as formats for the PMU in /sys/bus/event_source/devices//format/*. diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 1948e956656c..936e69f886d7 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -39,6 +39,10 @@ report:: - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. + - a symbolic or raw PMU event followed by an optional colon + and a list of event modifiers, e.g., cpu-cycles:p. See the + linkperf:perf-list[1] man page for details on event modifiers. + - a symbolically formed event like 'pmu/param1=0x3,param2/' where param1 and param2 are defined as formats for the PMU in /sys/bus/event_source/devices//format/* diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c index 0a6e75b8777a..28a5d0c18b1d 100644 --- a/tools/perf/arch/arm/util/auxtrace.c +++ b/tools/perf/arch/arm/util/auxtrace.c @@ -56,7 +56,7 @@ struct auxtrace_record struct perf_pmu *cs_etm_pmu; struct evsel *evsel; bool found_etm = false; - bool found_spe = false; + struct perf_pmu *found_spe = NULL; static struct perf_pmu **arm_spe_pmus = NULL; static int nr_spes = 0; int i = 0; @@ -74,12 +74,12 @@ struct auxtrace_record evsel->core.attr.type == cs_etm_pmu->type) found_etm = true; - if (!nr_spes) + if (!nr_spes || found_spe) continue; for (i = 0; i < nr_spes; i++) { if (evsel->core.attr.type == arm_spe_pmus[i]->type) { - found_spe = true; + found_spe = arm_spe_pmus[i]; break; } } @@ -96,7 +96,7 @@ struct auxtrace_record #if defined(__aarch64__) if (found_spe) - return arm_spe_recording_init(err, arm_spe_pmus[i]); + return arm_spe_recording_init(err, found_spe); #endif /* diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index d43f9dec6998..e768c02ef2ab 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -596,6 +596,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, } evsel->core.attr.freq = 0; evsel->core.attr.sample_period = 1; + evsel->no_aux_samples = true; intel_pt_evsel = evsel; opts->full_auxtrace = true; } diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index fddb3ced9db6..4aa6de1aa67d 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h @@ -2,6 +2,10 @@ #ifndef BENCH_H #define BENCH_H +#include + +extern struct timeval bench__start, bench__end, bench__runtime; + /* * The madvise transparent hugepage constants were added in glibc * 2.13. For compatibility with older versions of glibc, define these diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c index bb617e568841..a7526c05df38 100644 --- a/tools/perf/bench/epoll-ctl.c +++ b/tools/perf/bench/epoll-ctl.c @@ -35,7 +35,6 @@ static unsigned int nthreads = 0; static unsigned int nsecs = 8; -struct timeval start, end, runtime; static bool done, __verbose, randomize; /* @@ -94,8 +93,8 @@ static void toggle_done(int sig __maybe_unused, { /* inform all threads that we're done for the day */ done = true; - gettimeofday(&end, NULL); - timersub(&end, &start, &runtime); + gettimeofday(&bench__end, NULL); + timersub(&bench__end, &bench__start, &bench__runtime); } static void nest_epollfd(void) @@ -361,7 +360,7 @@ int bench_epoll_ctl(int argc, const char **argv) threads_starting = nthreads; - gettimeofday(&start, NULL); + gettimeofday(&bench__start, NULL); do_threads(worker, cpu); diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c index 7af694437f4e..d1c5cb526b9f 100644 --- a/tools/perf/bench/epoll-wait.c +++ b/tools/perf/bench/epoll-wait.c @@ -90,7 +90,6 @@ static unsigned int nthreads = 0; static unsigned int nsecs = 8; -struct timeval start, end, runtime; static bool wdone, done, __verbose, randomize, nonblocking; /* @@ -276,8 +275,8 @@ static void toggle_done(int sig __maybe_unused, { /* inform all threads that we're done for the day */ done = true; - gettimeofday(&end, NULL); - timersub(&end, &start, &runtime); + gettimeofday(&bench__end, NULL); + timersub(&bench__end, &bench__start, &bench__runtime); } static void print_summary(void) @@ -287,7 +286,7 @@ static void print_summary(void) printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", avg, rel_stddev_stats(stddev, avg), - (int) runtime.tv_sec); + (int)bench__runtime.tv_sec); } static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) @@ -479,7 +478,7 @@ int bench_epoll_wait(int argc, const char **argv) threads_starting = nthreads; - gettimeofday(&start, NULL); + gettimeofday(&bench__start, NULL); do_threads(worker, cpu); @@ -519,7 +518,7 @@ int bench_epoll_wait(int argc, const char **argv) qsort(worker, nthreads, sizeof(struct worker), cmpworker); for (i = 0; i < nthreads; i++) { - unsigned long t = worker[i].ops/runtime.tv_sec; + unsigned long t = worker[i].ops / bench__runtime.tv_sec; update_stats(&throughput_stats, t); diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c index 8ba0c3330a9a..21776862e940 100644 --- a/tools/perf/bench/futex-hash.c +++ b/tools/perf/bench/futex-hash.c @@ -37,7 +37,7 @@ static unsigned int nfutexes = 1024; static bool fshared = false, done = false, silent = false; static int futex_flag = 0; -struct timeval start, end, runtime; +struct timeval bench__start, bench__end, bench__runtime; static pthread_mutex_t thread_lock; static unsigned int threads_starting; static struct stats throughput_stats; @@ -103,8 +103,8 @@ static void toggle_done(int sig __maybe_unused, { /* inform all threads that we're done for the day */ done = true; - gettimeofday(&end, NULL); - timersub(&end, &start, &runtime); + gettimeofday(&bench__end, NULL); + timersub(&bench__end, &bench__start, &bench__runtime); } static void print_summary(void) @@ -114,7 +114,7 @@ static void print_summary(void) printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg), - (int) runtime.tv_sec); + (int)bench__runtime.tv_sec); } int bench_futex_hash(int argc, const char **argv) @@ -161,7 +161,7 @@ int bench_futex_hash(int argc, const char **argv) threads_starting = nthreads; pthread_attr_init(&thread_attr); - gettimeofday(&start, NULL); + gettimeofday(&bench__start, NULL); for (i = 0; i < nthreads; i++) { worker[i].tid = i; worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex)); @@ -204,7 +204,7 @@ int bench_futex_hash(int argc, const char **argv) pthread_mutex_destroy(&thread_lock); for (i = 0; i < nthreads; i++) { - unsigned long t = worker[i].ops/runtime.tv_sec; + unsigned long t = worker[i].ops / bench__runtime.tv_sec; update_stats(&throughput_stats, t); if (!silent) { if (nfutexes == 1) diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c index d0cae8125423..30d97121dc4f 100644 --- a/tools/perf/bench/futex-lock-pi.c +++ b/tools/perf/bench/futex-lock-pi.c @@ -37,7 +37,6 @@ static bool silent = false, multi = false; static bool done = false, fshared = false; static unsigned int nthreads = 0; static int futex_flag = 0; -struct timeval start, end, runtime; static pthread_mutex_t thread_lock; static unsigned int threads_starting; static struct stats throughput_stats; @@ -64,7 +63,7 @@ static void print_summary(void) printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg), - (int) runtime.tv_sec); + (int)bench__runtime.tv_sec); } static void toggle_done(int sig __maybe_unused, @@ -73,8 +72,8 @@ static void toggle_done(int sig __maybe_unused, { /* inform all threads that we're done for the day */ done = true; - gettimeofday(&end, NULL); - timersub(&end, &start, &runtime); + gettimeofday(&bench__end, NULL); + timersub(&bench__end, &bench__start, &bench__runtime); } static void *workerfn(void *arg) @@ -185,7 +184,7 @@ int bench_futex_lock_pi(int argc, const char **argv) threads_starting = nthreads; pthread_attr_init(&thread_attr); - gettimeofday(&start, NULL); + gettimeofday(&bench__start, NULL); create_threads(worker, thread_attr, cpu); pthread_attr_destroy(&thread_attr); @@ -211,7 +210,7 @@ int bench_futex_lock_pi(int argc, const char **argv) pthread_mutex_destroy(&thread_lock); for (i = 0; i < nthreads; i++) { - unsigned long t = worker[i].ops/runtime.tv_sec; + unsigned long t = worker[i].ops / bench__runtime.tv_sec; update_stats(&throughput_stats, t); if (!silent) diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c index 9235b76501be..19d45c377ac1 100644 --- a/tools/perf/bench/mem-functions.c +++ b/tools/perf/bench/mem-functions.c @@ -223,12 +223,8 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info * return 0; } -static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst) +static void memcpy_prefault(memcpy_t fn, size_t size, void *src, void *dst) { - u64 cycle_start = 0ULL, cycle_end = 0ULL; - memcpy_t fn = r->fn.memcpy; - int i; - /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */ memset(src, 0, size); @@ -237,6 +233,15 @@ static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, vo * to not measure page fault overhead: */ fn(dst, src, size); +} + +static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst) +{ + u64 cycle_start = 0ULL, cycle_end = 0ULL; + memcpy_t fn = r->fn.memcpy; + int i; + + memcpy_prefault(fn, size, src, dst); cycle_start = get_cycles(); for (i = 0; i < nr_loops; ++i) @@ -252,11 +257,7 @@ static double do_memcpy_gettimeofday(const struct function *r, size_t size, void memcpy_t fn = r->fn.memcpy; int i; - /* - * We prefault the freshly allocated memory range here, - * to not measure page fault overhead: - */ - fn(dst, src, size); + memcpy_prefault(fn, size, src, dst); BUG_ON(gettimeofday(&tv_start, NULL)); for (i = 0; i < nr_loops; ++i) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 26bc5923e6b5..2f05f59e9758 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -364,6 +364,9 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs) for (k = 0; k < pev->ntevs; k++) { struct probe_trace_event *tev = &pev->tevs[k]; + /* Skipped events have no event name */ + if (!tev->event) + continue; /* We use tev's name for showing new events */ show_perf_probe_event(tev->group, tev->event, pev, diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 23332861de6e..454e275cd5df 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -2137,7 +2137,7 @@ static struct option __record_options[] = { OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize, "synthesize non-sample events at the end of output"), OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"), - OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"), + OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"), OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq, "Fail if the specified frequency can't be used"), OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'", diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 4d8db41b949a..d3c0b04e2e22 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -462,8 +462,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report if (rep->time_str) ret += fprintf(fp, " (time slices: %s)", rep->time_str); - if (symbol_conf.show_ref_callgraph && - strstr(evname, "call-graph=no")) { + if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) { ret += fprintf(fp, ", show reference callgraph"); } diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index d5d71c9ac99f..fa5c2e215bcc 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -137,7 +137,7 @@ static char *fixregex(char *s) return s; /* allocate space for a new string */ - fixed = (char *) malloc(len + 1); + fixed = (char *) malloc(len + esc_count + 1); if (!fixed) return NULL; diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index 7bd73a904b4e..d187e46c2683 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -1055,7 +1055,7 @@ def cbr(id, raw_buf): cbr = data[0] MHz = (data[4] + 500) / 1000 percent = ((cbr * 1000 / data[2]) + 5) / 10 - value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, MHz, 4, percent) + value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, int(MHz), 4, int(percent)) cbr_file.write(value) def mwait(id, raw_buf): diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py index 4b28c9d08d5a..04217e8f535a 100755 --- a/tools/perf/scripts/python/exported-sql-viewer.py +++ b/tools/perf/scripts/python/exported-sql-viewer.py @@ -756,7 +756,8 @@ class CallGraphModel(CallGraphModelBase): " FROM calls" " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" - " WHERE symbols.name" + match + + " WHERE calls.id <> 0" + " AND symbols.name" + match + " GROUP BY comm_id, thread_id, call_path_id" " ORDER BY comm_id, thread_id, call_path_id") @@ -950,7 +951,8 @@ class CallTreeModel(CallGraphModelBase): " FROM calls" " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" - " WHERE symbols.name" + match + + " WHERE calls.id <> 0" + " AND symbols.name" + match + " ORDER BY comm_id, thread_id, call_time, calls.id") def FindPath(self, query): @@ -1016,6 +1018,7 @@ class TreeWindowBase(QMdiSubWindow): child = self.model.index(row, 0, parent) if child.internalPointer().dbid == dbid: found = True + self.view.setExpanded(parent, True) self.view.setCurrentIndex(child) parent = child break diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c index 016bba2c142d..55a9de311d7b 100644 --- a/tools/perf/tests/bp_account.c +++ b/tools/perf/tests/bp_account.c @@ -23,7 +23,7 @@ #include "../perf-sys.h" #include "cloexec.h" -volatile long the_var; +static volatile long the_var; static noinline int test_function(void) { diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c index 166f411568a5..b5cdedd13cbc 100644 --- a/tools/perf/tests/bp_signal.c +++ b/tools/perf/tests/bp_signal.c @@ -45,10 +45,13 @@ volatile long the_var; #if defined (__x86_64__) extern void __test_function(volatile long *ptr); asm ( + ".pushsection .text;" ".globl __test_function\n" + ".type __test_function, @function;" "__test_function:\n" "incq (%rdi)\n" - "ret\n"); + "ret\n" + ".popsection\n"); #else static void __test_function(volatile long *ptr) { diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c index 74379ff1f7fa..46cd1db85bd0 100644 --- a/tools/perf/tests/pmu.c +++ b/tools/perf/tests/pmu.c @@ -173,6 +173,7 @@ int test__pmu(struct test *test __maybe_unused, int subtest __maybe_unused) ret = 0; } while (0); + perf_pmu__del_formats(&formats); test_format_dir_put(format); return ret; } diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 88c3df24b748..514cef3a17b4 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -2224,6 +2224,11 @@ static struct thread *hist_browser__selected_thread(struct hist_browser *browser return browser->he_selection->thread; } +static struct res_sample *hist_browser__selected_res_sample(struct hist_browser *browser) +{ + return browser->he_selection ? browser->he_selection->res_samples : NULL; +} + /* Check whether the browser is for 'top' or 'report' */ static inline bool is_report_browser(void *timer) { @@ -3170,16 +3175,16 @@ skip_annotation: &options[nr_options], NULL, NULL, evsel); nr_options += add_res_sample_opt(browser, &actions[nr_options], &options[nr_options], - hist_browser__selected_entry(browser)->res_samples, - evsel, A_NORMAL); + hist_browser__selected_res_sample(browser), + evsel, A_NORMAL); nr_options += add_res_sample_opt(browser, &actions[nr_options], &options[nr_options], - hist_browser__selected_entry(browser)->res_samples, - evsel, A_ASM); + hist_browser__selected_res_sample(browser), + evsel, A_ASM); nr_options += add_res_sample_opt(browser, &actions[nr_options], &options[nr_options], - hist_browser__selected_entry(browser)->res_samples, - evsel, A_SOURCE); + hist_browser__selected_res_sample(browser), + evsel, A_SOURCE); nr_options += add_switch_opt(browser, &actions[nr_options], &options[nr_options]); skip_scripting: diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index e11ddf86f2b3..ab2e130dc07a 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -47,6 +47,7 @@ char dso__symtab_origin(const struct dso *dso) [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', + [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', @@ -129,6 +130,21 @@ int dso__read_binary_type_filename(const struct dso *dso, snprintf(filename + len, size - len, "%s", dso->long_name); break; + case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: + /* + * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in + * /usr/lib/debug/lib when it is expected to be in + * /usr/lib/debug/usr/lib + */ + if (strlen(dso->long_name) < 9 || + strncmp(dso->long_name, "/usr/lib/", 9)) { + ret = -1; + break; + } + len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); + snprintf(filename + len, size - len, "%s", dso->long_name + 4); + break; + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: { const char *last_slash; diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index e4dddb76770d..69bb77d19164 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -30,6 +30,7 @@ enum dso_binary_type { DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO, DSO_BINARY_TYPE__FEDORA_DEBUGINFO, DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__BUILDID_DEBUGINFO, DSO_BINARY_TYPE__SYSTEM_PATH_DSO, DSO_BINARY_TYPE__GUEST_KMODULE, diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index 3baca06786fb..018ecf7b6da9 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -326,11 +326,11 @@ static const char *normalize_arch(char *arch) const char *perf_env__arch(struct perf_env *env) { - struct utsname uts; char *arch_name; if (!env || !env->arch) { /* Assume local operation */ - if (uname(&uts) < 0) + static struct utsname uts = { .machine[0] = '\0', }; + if (uts.machine[0] == '\0' && uname(&uts) < 0) return NULL; arch_name = uts.machine; } else diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index de79c735e441..505b890ac85c 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -976,6 +976,10 @@ int perf_evlist__create_maps(struct evlist *evlist, struct target *target) perf_evlist__set_maps(&evlist->core, cpus, threads); + /* as evlist now has references, put count here */ + perf_cpu_map__put(cpus); + perf_thread_map__put(threads); + return 0; out_delete_threads: @@ -1230,11 +1234,12 @@ static int perf_evlist__create_syswide_maps(struct evlist *evlist) goto out_put; perf_evlist__set_maps(&evlist->core, cpus, threads); -out: - return err; + + perf_thread_map__put(threads); out_put: perf_cpu_map__put(cpus); - goto out; +out: + return err; } int evlist__open(struct evlist *evlist) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index abc7fda4a0fe..a844715a352d 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1028,12 +1028,12 @@ void perf_evsel__config(struct evsel *evsel, struct record_opts *opts, if (callchain && callchain->enabled && !evsel->no_aux_samples) perf_evsel__config_callchain(evsel, opts, callchain); - if (opts->sample_intr_regs) { + if (opts->sample_intr_regs && !evsel->no_aux_samples) { attr->sample_regs_intr = opts->sample_intr_regs; perf_evsel__set_sample_bit(evsel, REGS_INTR); } - if (opts->sample_user_regs) { + if (opts->sample_user_regs && !evsel->no_aux_samples) { attr->sample_regs_user |= opts->sample_user_regs; perf_evsel__set_sample_bit(evsel, REGS_USER); } diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index f8ccfd6be0ee..7ffcbd6fcd1a 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -1164,6 +1164,7 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder) return 0; if (err == -EAGAIN || intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) { + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; if (intel_pt_fup_event(decoder)) return 0; return -EAGAIN; @@ -1942,17 +1943,13 @@ next: } if (decoder->set_fup_mwait) no_tip = true; + if (no_tip) + decoder->pkt_state = INTEL_PT_STATE_FUP_NO_TIP; + else + decoder->pkt_state = INTEL_PT_STATE_FUP; err = intel_pt_walk_fup(decoder); - if (err != -EAGAIN) { - if (err) - return err; - if (no_tip) - decoder->pkt_state = - INTEL_PT_STATE_FUP_NO_TIP; - else - decoder->pkt_state = INTEL_PT_STATE_FUP; - return 0; - } + if (err != -EAGAIN) + return err; if (no_tip) { no_tip = false; break; @@ -1980,8 +1977,10 @@ next: * possibility of another CBR change that gets caught up * in the PSB+. */ - if (decoder->cbr != decoder->cbr_seen) + if (decoder->cbr != decoder->cbr_seen) { + decoder->state.type = 0; return 0; + } break; case INTEL_PT_PIP: @@ -2022,8 +2021,10 @@ next: case INTEL_PT_CBR: intel_pt_calc_cbr(decoder); - if (decoder->cbr != decoder->cbr_seen) + if (decoder->cbr != decoder->cbr_seen) { + decoder->state.type = 0; return 0; + } break; case INTEL_PT_MODE_EXEC: @@ -2599,15 +2600,11 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) err = intel_pt_walk_tip(decoder); break; case INTEL_PT_STATE_FUP: - decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; err = intel_pt_walk_fup(decoder); if (err == -EAGAIN) err = intel_pt_walk_fup_tip(decoder); - else if (!err) - decoder->pkt_state = INTEL_PT_STATE_FUP; break; case INTEL_PT_STATE_FUP_NO_TIP: - decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; err = intel_pt_walk_fup(decoder); if (err == -EAGAIN) err = intel_pt_walk_trace(decoder); diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index a1c9eb6d4f40..c5cce3a60476 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -1707,6 +1707,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq) u64 sample_type = evsel->core.attr.sample_type; u64 id = evsel->core.id[0]; u8 cpumode; + u64 regs[8 * sizeof(sample.intr_regs.mask)]; if (intel_pt_skip_event(pt)) return 0; @@ -1756,8 +1757,8 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq) } if (sample_type & PERF_SAMPLE_REGS_INTR && - items->mask[INTEL_PT_GP_REGS_POS]) { - u64 regs[sizeof(sample.intr_regs.mask)]; + (items->mask[INTEL_PT_GP_REGS_POS] || + items->mask[INTEL_PT_XMM_POS])) { u64 regs_mask = evsel->core.attr.sample_regs_intr; u64 *pos; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 422ad1888e74..759a99f723fc 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -370,7 +370,7 @@ static int add_event_tool(struct list_head *list, int *idx, return -ENOMEM; evsel->tool_event = tool_event; if (tool_event == PERF_TOOL_DURATION_TIME) - evsel->unit = strdup("ns"); + evsel->unit = "ns"; return 0; } diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 5608da82ad23..628a6d5a5b38 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -1294,6 +1294,17 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to) set_bit(b, bits); } +void perf_pmu__del_formats(struct list_head *formats) +{ + struct perf_pmu_format *fmt, *tmp; + + list_for_each_entry_safe(fmt, tmp, formats, list) { + list_del(&fmt->list); + free(fmt->name); + free(fmt); + } +} + static int sub_non_neg(int a, int b) { if (b > a) diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index f36ade6df76d..9570d9b26250 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -81,6 +81,7 @@ int perf_pmu__new_format(struct list_head *list, char *name, int config, unsigned long *bits); void perf_pmu__set_format(unsigned long *bits, long from, long to); int perf_pmu__format_parse(char *dir, struct list_head *head); +void perf_pmu__del_formats(struct list_head *formats); struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 92b07be0b48b..a5cb1a3a1064 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -102,7 +102,7 @@ void exit_probe_symbol_maps(void) symbol__exit(); } -static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) +static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap) { /* kmap->ref_reloc_sym should be set if host_machine is initialized */ struct kmap *kmap; @@ -114,6 +114,10 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) kmap = map__kmap(map); if (!kmap) return NULL; + + if (pmap) + *pmap = map; + return kmap->ref_reloc_sym; } @@ -125,7 +129,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr, struct map *map; /* ref_reloc_sym is just a label. Need a special fix*/ - reloc_sym = kernel_get_ref_reloc_sym(); + reloc_sym = kernel_get_ref_reloc_sym(NULL); if (reloc_sym && strcmp(name, reloc_sym->name) == 0) *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; else { @@ -232,21 +236,22 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs) static bool kprobe_blacklist__listed(unsigned long address); static bool kprobe_warn_out_range(const char *symbol, unsigned long address) { - u64 etext_addr = 0; - int ret; + struct map *map; + bool ret = false; - /* Get the address of _etext for checking non-probable text symbol */ - ret = kernel_get_symbol_address_by_name("_etext", &etext_addr, - false, false); - - if (ret == 0 && etext_addr < address) - pr_warning("%s is out of .text, skip it.\n", symbol); - else if (kprobe_blacklist__listed(address)) + map = kernel_get_module_map(NULL); + if (map) { + ret = address <= map->start || map->end < address; + if (ret) + pr_warning("%s is out of .text, skip it.\n", symbol); + map__put(map); + } + if (!ret && kprobe_blacklist__listed(address)) { pr_warning("%s is blacklisted function, skip it.\n", symbol); - else - return false; + ret = true; + } - return true; + return ret; } /* @@ -745,6 +750,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, int ntevs) { struct ref_reloc_sym *reloc_sym; + struct map *map; char *tmp; int i, skipped = 0; @@ -753,7 +759,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, return post_process_offline_probe_trace_events(tevs, ntevs, symbol_conf.vmlinux_name); - reloc_sym = kernel_get_ref_reloc_sym(); + reloc_sym = kernel_get_ref_reloc_sym(&map); if (!reloc_sym) { pr_warning("Relocated base symbol is not found!\n"); return -EINVAL; @@ -764,9 +770,13 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, continue; if (tevs[i].point.retprobe && !kretprobe_offset_is_supported()) continue; - /* If we found a wrong one, mark it by NULL symbol */ + /* + * If we found a wrong one, mark it by NULL symbol. + * Since addresses in debuginfo is same as objdump, we need + * to convert it to addresses on memory. + */ if (kprobe_warn_out_range(tevs[i].point.symbol, - tevs[i].point.address)) { + map__objdump_2mem(map, tevs[i].point.address))) { tmp = NULL; skipped++; } else { @@ -2922,7 +2932,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, /* Note that the symbols in the kmodule are not relocated */ if (!pev->uprobes && !pev->target && (!pp->retprobe || kretprobe_offset_is_supported())) { - reloc_sym = kernel_get_ref_reloc_sym(); + reloc_sym = kernel_get_ref_reloc_sym(NULL); if (!reloc_sym) { pr_warning("Relocated base symbol is not found!\n"); ret = -EINVAL; diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index aaf3b24fffa4..849d8d2e5976 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -101,6 +101,7 @@ enum dso_binary_type distro_dwarf_types[] = { DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__NOT_FOUND, }; @@ -1361,7 +1362,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, tf.ntevs = 0; ret = debuginfo__find_probes(dbg, &tf.pf); - if (ret < 0) { + if (ret < 0 || tf.ntevs == 0) { for (i = 0; i < tf.ntevs; i++) clear_probe_trace_event(&tf.tevs[i]); zfree(tevs); diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index ed3b0ac2f785..373e399e57d2 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -661,7 +661,7 @@ static void print_aggr(struct perf_stat_config *config, int s; bool first; - if (!(config->aggr_map || config->aggr_get_id)) + if (!config->aggr_map || !config->aggr_get_id) return; aggr_update_shadow(config, evlist); @@ -1140,7 +1140,7 @@ static void print_percore(struct perf_stat_config *config, int s; bool first = true; - if (!(config->aggr_map || config->aggr_get_id)) + if (!config->aggr_map || !config->aggr_get_id) return; for (s = 0; s < config->aggr_map->nr; s++) { diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index ebdd130557fb..5156aa971fbb 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -367,8 +367,10 @@ int perf_stat_process_counter(struct perf_stat_config *config, * interval mode, otherwise overall avg running * averages will be shown for each interval. */ - if (config->interval) - init_stats(ps->res_stats); + if (config->interval) { + for (i = 0; i < 3; i++) + init_stats(&ps->res_stats[i]); + } if (counter->per_pkg) zero_per_pkg(counter); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index a8f80e427674..901ad7f6f4dc 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -79,6 +79,7 @@ static enum dso_binary_type binary_type_symtab[] = { DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, DSO_BINARY_TYPE__NOT_FOUND, }; @@ -1220,6 +1221,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map) m->end = old_map->start; list_add_tail(&m->node, &merged); + new_map->pgoff += old_map->end - new_map->start; new_map->start = old_map->end; } } else { @@ -1240,6 +1242,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map) * |new......| -> |new...| * |old....| -> |old....| */ + new_map->pgoff += old_map->end - new_map->start; new_map->start = old_map->end; } } @@ -1530,6 +1533,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: + case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: return !kmod && dso->kernel == DSO_TYPE_USER; diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 5dc109f4c097..b9601f13cf03 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -25,6 +25,7 @@ CONFIG_XDP_SOCKETS=y CONFIG_FTRACE_SYSCALLS=y CONFIG_IPV6_TUNNEL=y CONFIG_IPV6_GRE=y +CONFIG_IPV6_SEG6_BPF=y CONFIG_NET_FOU=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_IPV6_FOU=m diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index 92563898867c..9f3634c9971d 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -523,6 +523,7 @@ void test_flow_dissector(void) CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err); } + close(tap_fd); bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR); bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index e1f1becda529..1c4219ceced2 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -793,19 +793,19 @@ static void test_sockmap(unsigned int tasks, void *data) } err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER); - if (err) { + if (!err) { printf("Failed empty parser prog detach\n"); goto out_sockmap; } err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT); - if (err) { + if (!err) { printf("Failed empty verdict prog detach\n"); goto out_sockmap; } err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT); - if (err) { + if (!err) { printf("Failed empty msg verdict prog detach\n"); goto out_sockmap; } @@ -1094,19 +1094,19 @@ static void test_sockmap(unsigned int tasks, void *data) assert(status == 0); } - err = bpf_prog_detach(map_fd_rx, __MAX_BPF_ATTACH_TYPE); + err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE); if (!err) { printf("Detached an invalid prog type.\n"); goto out_sockmap; } - err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_PARSER); + err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER); if (err) { printf("Failed parser prog detach\n"); goto out_sockmap; } - err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_VERDICT); + err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT); if (err) { printf("Failed parser prog detach\n"); goto out_sockmap; @@ -1282,6 +1282,8 @@ static void __run_parallel(unsigned int tasks, pid_t pid[tasks]; int i; + fflush(stdout); + for (i = 0; i < tasks; i++) { pid[i] = fork(); if (pid[i] == 0) { diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 3bf18364c67c..48bbe8e0ce48 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -7,6 +7,8 @@ #include #include +#define EXIT_NO_TEST 2 + /* defined in test_progs.h */ struct test_env env; @@ -293,6 +295,7 @@ int extract_build_id(char *build_id, size_t size) len = size; memcpy(build_id, line, len); build_id[len] = '\0'; + free(line); return 0; err: fclose(fp); @@ -583,5 +586,8 @@ int main(int argc, char **argv) free(env.test_selector.num_set); free(env.subtest_selector.num_set); + if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) + return EXIT_NO_TEST; + return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; } diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index bdb69599c4bd..5e939ff1e3f9 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c @@ -105,7 +105,7 @@ int cg_read_strcmp(const char *cgroup, const char *control, /* Handle the case of comparing against empty string */ if (!expected) - size = 32; + return -1; else size = strlen(expected) + 1; diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh index 9dc35a16e415..51df5e305855 100755 --- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh +++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh @@ -144,7 +144,7 @@ setup() cleanup() { - for n in h1 r1 h2 h3 h4 + for n in h0 r1 h1 h2 h3 do ip netns del ${n} 2>/dev/null done diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index 6560ed796ac4..09830b88ec8c 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -512,6 +512,19 @@ ipv6_fcnal_runtime() run_cmd "$IP nexthop add id 86 via 2001:db8:91::2 dev veth1" run_cmd "$IP ro add 2001:db8:101::1/128 nhid 81" + # rpfilter and default route + $IP nexthop flush >/dev/null 2>&1 + run_cmd "ip netns exec me ip6tables -t mangle -I PREROUTING 1 -m rpfilter --invert -j DROP" + run_cmd "$IP nexthop add id 91 via 2001:db8:91::2 dev veth1" + run_cmd "$IP nexthop add id 92 via 2001:db8:92::2 dev veth3" + run_cmd "$IP nexthop add id 93 group 91/92" + run_cmd "$IP -6 ro add default nhid 91" + run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1" + log_test $? 0 "Nexthop with default route and rpfilter" + run_cmd "$IP -6 ro replace default nhid 93" + run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1" + log_test $? 0 "Nexthop with multipath default route and rpfilter" + # TO-DO: # existing route with old nexthop; append route with new nexthop # existing route with old nexthop; replace route with new diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh index 18c5de53558a..bf361f30d6ef 100755 --- a/tools/testing/selftests/net/icmp_redirect.sh +++ b/tools/testing/selftests/net/icmp_redirect.sh @@ -180,6 +180,8 @@ setup() ;; r[12]) ip netns exec $ns sysctl -q -w net.ipv4.ip_forward=1 ip netns exec $ns sysctl -q -w net.ipv4.conf.all.send_redirects=1 + ip netns exec $ns sysctl -q -w net.ipv4.conf.default.rp_filter=0 + ip netns exec $ns sysctl -q -w net.ipv4.conf.all.rp_filter=0 ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=1 ip netns exec $ns sysctl -q -w net.ipv6.route.mtu_expires=10 diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh index 15d3489ecd9c..ceb7ad4dbd94 100755 --- a/tools/testing/selftests/net/ip_defrag.sh +++ b/tools/testing/selftests/net/ip_defrag.sh @@ -6,6 +6,8 @@ set +x set -e +modprobe -q nf_defrag_ipv6 + readonly NETNS="ns-$(mktemp -u XXXXXX)" setup() { diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c index 4b02933cab8a..bdc03a2097e8 100644 --- a/tools/testing/selftests/net/msg_zerocopy.c +++ b/tools/testing/selftests/net/msg_zerocopy.c @@ -125,9 +125,8 @@ static int do_setcpu(int cpu) CPU_ZERO(&mask); CPU_SET(cpu, &mask); if (sched_setaffinity(0, sizeof(mask), &mask)) - error(1, 0, "setaffinity %d", cpu); - - if (cfg_verbose) + fprintf(stderr, "cpu: unable to pin, may increase variance.\n"); + else if (cfg_verbose) fprintf(stderr, "cpu: %u\n", cpu); return 0; diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index 8c8c7d79c38d..2c522f7a0aec 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -350,7 +350,8 @@ static int test_datapath(uint16_t typeflags, int port_off, int fds[2], fds_udp[2][2], ret; fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n", - typeflags, PORT_BASE, PORT_BASE + port_off); + typeflags, (uint16_t)PORT_BASE, + (uint16_t)(PORT_BASE + port_off)); fds[0] = sock_fanout_open(typeflags, 0); fds[1] = sock_fanout_open(typeflags, 0); diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c index 383bac05ac32..3155fbbf644b 100644 --- a/tools/testing/selftests/net/so_txtime.c +++ b/tools/testing/selftests/net/so_txtime.c @@ -15,8 +15,9 @@ #include #include #include +#include #include -#include +#include #include #include #include @@ -120,7 +121,7 @@ static bool do_recv_one(int fdr, struct timed_send *ts) if (rbuf[0] != ts->data) error(1, 0, "payload mismatch. expected %c", ts->data); - if (labs(tstop - texpect) > cfg_variance_us) + if (llabs(tstop - texpect) > cfg_variance_us) error(1, 0, "exceeds variance (%d us)", cfg_variance_us); return false; @@ -140,8 +141,8 @@ static void do_recv_errqueue_timeout(int fdt) { char control[CMSG_SPACE(sizeof(struct sock_extended_err)) + CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0}; - char data[sizeof(struct ipv6hdr) + - sizeof(struct tcphdr) + 1]; + char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + + sizeof(struct udphdr) + 1]; struct sock_extended_err *err; struct msghdr msg = {0}; struct iovec iov = {0}; @@ -159,6 +160,8 @@ static void do_recv_errqueue_timeout(int fdt) msg.msg_controllen = sizeof(control); while (1) { + const char *reason; + ret = recvmsg(fdt, &msg, MSG_ERRQUEUE); if (ret == -1 && errno == EAGAIN) break; @@ -176,14 +179,30 @@ static void do_recv_errqueue_timeout(int fdt) err = (struct sock_extended_err *)CMSG_DATA(cm); if (err->ee_origin != SO_EE_ORIGIN_TXTIME) error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin); - if (err->ee_code != ECANCELED) - error(1, 0, "errqueue: code 0x%x\n", err->ee_code); + + switch (err->ee_errno) { + case ECANCELED: + if (err->ee_code != SO_EE_CODE_TXTIME_MISSED) + error(1, 0, "errqueue: unknown ECANCELED %u\n", + err->ee_code); + reason = "missed txtime"; + break; + case EINVAL: + if (err->ee_code != SO_EE_CODE_TXTIME_INVALID_PARAM) + error(1, 0, "errqueue: unknown EINVAL %u\n", + err->ee_code); + reason = "invalid txtime"; + break; + default: + error(1, 0, "errqueue: errno %u code %u\n", + err->ee_errno, err->ee_code); + }; tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info; tstamp -= (int64_t) glob_tstart; tstamp /= 1000 * 1000; - fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n", - data[ret - 1], tstamp); + fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped: %s\n", + data[ret - 1], tstamp, reason); msg.msg_flags = 0; msg.msg_controllen = sizeof(control); diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c index 422e7761254d..bcb79ba1f214 100644 --- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c +++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c @@ -329,8 +329,7 @@ int main(int argc, char **argv) bool all_tests = true; int arg_index = 0; int failures = 0; - int s, t; - char opt; + int s, t, opt; while ((opt = getopt_long(argc, argv, "", long_options, &arg_index)) != -1) { diff --git a/tools/testing/selftests/networking/timestamping/timestamping.c b/tools/testing/selftests/networking/timestamping/timestamping.c index aca3491174a1..f4bb4fef0f39 100644 --- a/tools/testing/selftests/networking/timestamping/timestamping.c +++ b/tools/testing/selftests/networking/timestamping/timestamping.c @@ -313,10 +313,16 @@ int main(int argc, char **argv) int val; socklen_t len; struct timeval next; + size_t if_len; if (argc < 2) usage(0); interface = argv[1]; + if_len = strlen(interface); + if (if_len >= IFNAMSIZ) { + printf("interface name exceeds IFNAMSIZ\n"); + exit(1); + } for (i = 2; i < argc; i++) { if (!strcasecmp(argv[i], "SO_TIMESTAMP")) @@ -350,12 +356,12 @@ int main(int argc, char **argv) bail("socket"); memset(&device, 0, sizeof(device)); - strncpy(device.ifr_name, interface, sizeof(device.ifr_name)); + memcpy(device.ifr_name, interface, if_len + 1); if (ioctl(sock, SIOCGIFADDR, &device) < 0) bail("getting interface IP address"); memset(&hwtstamp, 0, sizeof(hwtstamp)); - strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name)); + memcpy(hwtstamp.ifr_name, interface, if_len + 1); hwtstamp.ifr_data = (void *)&hwconfig; memset(&hwconfig, 0, sizeof(hwconfig)); hwconfig.tx_type = diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh index 9c60337317c6..020137b61407 100755 --- a/tools/testing/selftests/ntb/ntb_test.sh +++ b/tools/testing/selftests/ntb/ntb_test.sh @@ -241,7 +241,7 @@ function get_files_count() split_remote $LOC if [[ "$REMOTE" == "" ]]; then - echo $(ls -1 "$LOC"/${NAME}* 2>/dev/null | wc -l) + echo $(ls -1 "$VPATH"/${NAME}* 2>/dev/null | wc -l) else echo $(ssh "$REMOTE" "ls -1 \"$VPATH\"/${NAME}* | \ wc -l" 2> /dev/null) diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c index a2e8c9da7fa5..d50cc05df495 100644 --- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c +++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -104,8 +105,9 @@ static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu) static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu) { - int pid; - cpu_set_t cpuset; + int pid, ncpus; + cpu_set_t *cpuset; + size_t size; pid = fork(); if (pid == -1) { @@ -116,14 +118,23 @@ static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu) if (pid) return; - CPU_ZERO(&cpuset); - CPU_SET(cpu, &cpuset); + ncpus = get_nprocs(); + size = CPU_ALLOC_SIZE(ncpus); + cpuset = CPU_ALLOC(ncpus); + if (!cpuset) { + perror("malloc"); + exit(1); + } + CPU_ZERO_S(size, cpuset); + CPU_SET_S(cpu, size, cpuset); - if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) { + if (sched_setaffinity(0, size, cpuset)) { perror("sched_setaffinity"); + CPU_FREE(cpuset); exit(1); } + CPU_FREE(cpuset); fn(arg); exit(0); diff --git a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh index f52ed92b53e7..00dc32c0ed75 100755 --- a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh +++ b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh @@ -5,12 +5,17 @@ pe_ok() { local dev="$1" local path="/sys/bus/pci/devices/$dev/eeh_pe_state" - if ! [ -e "$path" ] ; then + # if a driver doesn't support the error handling callbacks then the + # device is recovered by removing and re-probing it. This causes the + # sysfs directory to disappear so read the PE state once and squash + # any potential error messages + local eeh_state="$(cat $path 2>/dev/null)" + if [ -z "$eeh_state" ]; then return 1; fi - local fw_state="$(cut -d' ' -f1 < $path)" - local sw_state="$(cut -d' ' -f2 < $path)" + local fw_state="$(echo $eeh_state | cut -d' ' -f1)" + local sw_state="$(echo $eeh_state | cut -d' ' -f2)" # If EEH_PE_ISOLATED or EEH_PE_RECOVERING are set then the PE is in an # error state or being recovered. Either way, not ok. diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c index a2d7b0e3dca9..a26ac122c759 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c @@ -91,8 +91,6 @@ int back_to_back_ebbs(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c index bc893813483e..bb9f587fa76e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c @@ -42,8 +42,6 @@ int cycles(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c index dcd351d20328..9ae795ce314e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c @@ -99,8 +99,6 @@ int cycles_with_freeze(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); printf("EBBs while frozen %d\n", ebbs_while_frozen); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c index 94c99c12c0f2..4b45a2e70f62 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c @@ -71,8 +71,6 @@ int cycles_with_mmcr2(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c index dfbc5c3ad52d..21537d6eb6b7 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c @@ -396,8 +396,6 @@ int ebb_child(union pipe read_pipe, union pipe write_pipe) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c index ca2f7d729155..b208bf6ad58d 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c @@ -38,8 +38,6 @@ static int victim_child(union pipe read_pipe, union pipe write_pipe) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); FAIL_IF(ebb_state.stats.ebb_count == 0); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c index ac3e6e182614..ba2681a12cc7 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c @@ -75,7 +75,6 @@ static int test_body(void) ebb_freeze_pmcs(); ebb_global_disable(); - count_pmc(4, sample_period); mtspr(SPRN_PMC4, 0xdead); dump_summary_ebb_state(); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c index b8242e9d97d2..791d37ba327b 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c @@ -70,13 +70,6 @@ int multi_counter(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - count_pmc(2, sample_period); - count_pmc(3, sample_period); - count_pmc(4, sample_period); - count_pmc(5, sample_period); - count_pmc(6, sample_period); - dump_ebb_state(); for (i = 0; i < 6; i++) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c index a05c0e18ded6..9b0f70d59702 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c @@ -61,8 +61,6 @@ static int cycles_child(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_summary_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c index 153ebc92234f..2904c741e04e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c @@ -82,8 +82,6 @@ static int test_body(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); if (mmcr0_mismatch) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c index eadad75ed7e6..b29f8ba22d1e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c @@ -76,8 +76,6 @@ int pmc56_overflow(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(2, sample_period); - dump_ebb_state(); printf("PMC5/6 overflow %d\n", pmc56_overflowed); diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c index bdbbbe8431e0..3694613f418f 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c @@ -44,7 +44,7 @@ struct shared_info { unsigned long amr2; /* AMR value that ptrace should refuse to write to the child. */ - unsigned long amr3; + unsigned long invalid_amr; /* IAMR value the parent expects to read from the child. */ unsigned long expected_iamr; @@ -57,8 +57,8 @@ struct shared_info { * (even though they're valid ones) because userspace doesn't have * access to those registers. */ - unsigned long new_iamr; - unsigned long new_uamor; + unsigned long invalid_iamr; + unsigned long invalid_uamor; }; static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights) @@ -66,11 +66,6 @@ static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights) return syscall(__NR_pkey_alloc, flags, init_access_rights); } -static int sys_pkey_free(int pkey) -{ - return syscall(__NR_pkey_free, pkey); -} - static int child(struct shared_info *info) { unsigned long reg; @@ -100,28 +95,32 @@ static int child(struct shared_info *info) info->amr1 |= 3ul << pkeyshift(pkey1); info->amr2 |= 3ul << pkeyshift(pkey2); - info->amr3 |= info->amr2 | 3ul << pkeyshift(pkey3); + /* + * invalid amr value where we try to force write + * things which are deined by a uamor setting. + */ + info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor); + /* + * if PKEY_DISABLE_EXECUTE succeeded we should update the expected_iamr + */ if (disable_execute) info->expected_iamr |= 1ul << pkeyshift(pkey1); else info->expected_iamr &= ~(1ul << pkeyshift(pkey1)); - info->expected_iamr &= ~(1ul << pkeyshift(pkey2) | 1ul << pkeyshift(pkey3)); - - info->expected_uamor |= 3ul << pkeyshift(pkey1) | - 3ul << pkeyshift(pkey2); - info->new_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2); - info->new_uamor |= 3ul << pkeyshift(pkey1); + /* + * We allocated pkey2 and pkey 3 above. Clear the IAMR bits. + */ + info->expected_iamr &= ~(1ul << pkeyshift(pkey2)); + info->expected_iamr &= ~(1ul << pkeyshift(pkey3)); /* - * We won't use pkey3. We just want a plausible but invalid key to test - * whether ptrace will let us write to AMR bits we are not supposed to. - * - * This also tests whether the kernel restores the UAMOR permissions - * after a key is freed. + * Create an IAMR value different from expected value. + * Kernel will reject an IAMR and UAMOR change. */ - sys_pkey_free(pkey3); + info->invalid_iamr = info->expected_iamr | (1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2)); + info->invalid_uamor = info->expected_uamor & ~(0x3ul << pkeyshift(pkey1)); printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n", user_write, info->amr1, pkey1, pkey2, pkey3); @@ -196,9 +195,9 @@ static int parent(struct shared_info *info, pid_t pid) PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync); PARENT_FAIL_IF(ret, &info->child_sync); - info->amr1 = info->amr2 = info->amr3 = regs[0]; - info->expected_iamr = info->new_iamr = regs[1]; - info->expected_uamor = info->new_uamor = regs[2]; + info->amr1 = info->amr2 = regs[0]; + info->expected_iamr = regs[1]; + info->expected_uamor = regs[2]; /* Wake up child so that it can set itself up. */ ret = prod_child(&info->child_sync); @@ -234,10 +233,10 @@ static int parent(struct shared_info *info, pid_t pid) return ret; /* Write invalid AMR value in child. */ - ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->amr3, 1); + ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->invalid_amr, 1); PARENT_FAIL_IF(ret, &info->child_sync); - printf("%-30s AMR: %016lx\n", ptrace_write_running, info->amr3); + printf("%-30s AMR: %016lx\n", ptrace_write_running, info->invalid_amr); /* Wake up child so that it can verify it didn't change. */ ret = prod_child(&info->child_sync); @@ -249,7 +248,7 @@ static int parent(struct shared_info *info, pid_t pid) /* Try to write to IAMR. */ regs[0] = info->amr1; - regs[1] = info->new_iamr; + regs[1] = info->invalid_iamr; ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 2); PARENT_FAIL_IF(!ret, &info->child_sync); @@ -257,7 +256,7 @@ static int parent(struct shared_info *info, pid_t pid) ptrace_write_running, regs[0], regs[1]); /* Try to write to IAMR and UAMOR. */ - regs[2] = info->new_uamor; + regs[2] = info->invalid_uamor; ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 3); PARENT_FAIL_IF(!ret, &info->child_sync); diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c index c02d24835db4..176102eca994 100644 --- a/tools/testing/selftests/powerpc/utils.c +++ b/tools/testing/selftests/powerpc/utils.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -88,28 +89,40 @@ void *get_auxv_entry(int type) int pick_online_cpu(void) { - cpu_set_t mask; - int cpu; + int ncpus, cpu = -1; + cpu_set_t *mask; + size_t size; - CPU_ZERO(&mask); - - if (sched_getaffinity(0, sizeof(mask), &mask)) { - perror("sched_getaffinity"); + ncpus = get_nprocs_conf(); + size = CPU_ALLOC_SIZE(ncpus); + mask = CPU_ALLOC(ncpus); + if (!mask) { + perror("malloc"); return -1; } + CPU_ZERO_S(size, mask); + + if (sched_getaffinity(0, size, mask)) { + perror("sched_getaffinity"); + goto done; + } + /* We prefer a primary thread, but skip 0 */ - for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8) - if (CPU_ISSET(cpu, &mask)) - return cpu; + for (cpu = 8; cpu < ncpus; cpu += 8) + if (CPU_ISSET_S(cpu, size, mask)) + goto done; /* Search for anything, but in reverse */ - for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--) - if (CPU_ISSET(cpu, &mask)) - return cpu; + for (cpu = ncpus - 1; cpu >= 0; cpu--) + if (CPU_ISSET_S(cpu, size, mask)) + goto done; printf("No cpus in affinity mask?!\n"); - return -1; + +done: + CPU_FREE(mask); + return cpu; } bool is_ppc64le(void) diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 96bbda4f10fc..19c7351eeb74 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -177,7 +177,7 @@ struct seccomp_metadata { #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif) #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ struct seccomp_notif_resp) -#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64) +#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64) struct seccomp_notif { __u64 id; diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile index 7656c7ce79d9..0e73a16874c4 100644 --- a/tools/testing/selftests/timers/Makefile +++ b/tools/testing/selftests/timers/Makefile @@ -13,6 +13,7 @@ DESTRUCTIVE_TESTS = alarmtimer-suspend valid-adjtimex adjtick change_skew \ TEST_GEN_PROGS_EXTENDED = $(DESTRUCTIVE_TESTS) +TEST_FILES := settings include ../lib.mk diff --git a/tools/testing/selftests/timers/settings b/tools/testing/selftests/timers/settings new file mode 100644 index 000000000000..e7b9417537fb --- /dev/null +++ b/tools/testing/selftests/timers/settings @@ -0,0 +1 @@ +timeout=0 diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh index 80521d46220c..31fb8265f643 100755 --- a/tools/testing/selftests/tpm2/test_smoke.sh +++ b/tools/testing/selftests/tpm2/test_smoke.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) python -m unittest -v tpm2_tests.SmokeTest diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh index a6f5e346635e..3ded3011b642 100755 --- a/tools/testing/selftests/tpm2/test_space.sh +++ b/tools/testing/selftests/tpm2/test_space.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) python -m unittest -v tpm2_tests.SpaceTest diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c index 6af951900aa3..312889edb84a 100644 --- a/tools/testing/selftests/vm/map_hugetlb.c +++ b/tools/testing/selftests/vm/map_hugetlb.c @@ -83,7 +83,7 @@ int main(int argc, char **argv) } if (shift) - printf("%u kB hugepages\n", 1 << shift); + printf("%u kB hugepages\n", 1 << (shift - 10)); else printf("Default size hugepages\n"); printf("Mapping %lu Mbytes\n", (unsigned long)length >> 20); diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c index 480995bceefa..47191af46617 100644 --- a/tools/testing/selftests/x86/protection_keys.c +++ b/tools/testing/selftests/x86/protection_keys.c @@ -24,6 +24,7 @@ #define _GNU_SOURCE #include #include +#include #include #include #include @@ -612,10 +613,10 @@ int alloc_random_pkey(void) int nr_alloced = 0; int random_index; memset(alloced_pkeys, 0, sizeof(alloced_pkeys)); + srand((unsigned int)time(NULL)); /* allocate every possible key and make a note of which ones we got */ max_nr_pkey_allocs = NR_PKEYS; - max_nr_pkey_allocs = 1; for (i = 0; i < max_nr_pkey_allocs; i++) { int new_pkey = alloc_pkey(); if (new_pkey < 0) diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 830e9143f9b2..d82b93493b57 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -332,7 +332,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd, * destroying the VM), otherwise another faulting VCPU may come in and mess * with things behind our backs. */ -static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) +static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, + bool may_block) { pgd_t *pgd; phys_addr_t addr = start, end = start + size; @@ -357,11 +358,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) * If the range is too large, release the kvm->mmu_lock * to prevent starvation and lockup detector warnings. */ - if (next != end) + if (may_block && next != end) cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); } +static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) +{ + __unmap_stage2_range(kvm, start, size, true); +} + static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { @@ -1199,7 +1205,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, return true; } -static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) +static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz) { pud_t *pudp; pmd_t *pmdp; @@ -1211,11 +1217,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) return false; if (pudp) - return kvm_s2pud_exec(pudp); + return sz <= PUD_SIZE && kvm_s2pud_exec(pudp); else if (pmdp) - return kvm_s2pmd_exec(pmdp); + return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp); else - return kvm_s2pte_exec(ptep); + return sz == PAGE_SIZE && kvm_s2pte_exec(ptep); } static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, @@ -1851,9 +1857,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * execute permissions, and we preserve whatever we have. */ needs_exec = exec_fault || - (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa)); + (fault_status == FSC_PERM && + stage2_is_exec(kvm, fault_ipa, vma_pagesize)); - if (vma_pagesize == PUD_SIZE) { + /* + * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and + * all we have is a 2-level page table. Trying to map a PUD in + * this case would be fatally wrong. + */ + if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) { pud_t new_pud = kvm_pfn_pud(pfn, mem_type); new_pud = kvm_pud_mkhuge(new_pud); @@ -2090,18 +2102,21 @@ static int handle_hva_to_gpa(struct kvm *kvm, static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) { - unmap_stage2_range(kvm, gpa, size); + unsigned flags = *(unsigned *)data; + bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; + + __unmap_stage2_range(kvm, gpa, size, may_block); return 0; } int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, unsigned flags) { if (!kvm->arch.pgd) return 0; trace_kvm_unmap_hva_range(start, end); - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); return 0; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d5d4cd581af3..4eed7fd8db93 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -425,7 +425,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, * count is also read inside the mmu_lock critical section. */ kvm->mmu_notifier_count++; - need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end); + need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end, + range->flags); need_tlb_flush |= kvm->tlbs_dirty; /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) @@ -4009,7 +4010,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev) { - int i; + int i, j; struct kvm_io_bus *new_bus, *bus; bus = kvm_get_bus(kvm, bus_idx); @@ -4026,17 +4027,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), GFP_KERNEL_ACCOUNT); - if (!new_bus) { + if (new_bus) { + memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); + new_bus->dev_count--; + memcpy(new_bus->range + i, bus->range + i + 1, + (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); + } else { pr_err("kvm: failed to shrink bus, removing it completely\n"); - goto broken; + for (j = 0; j < bus->dev_count; j++) { + if (j == i) + continue; + kvm_iodevice_destructor(bus->range[j].dev); + } } - memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); - new_bus->dev_count--; - memcpy(new_bus->range + i, bus->range + i + 1, - (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); - -broken: rcu_assign_pointer(kvm->buses[bus_idx], new_bus); synchronize_srcu_expedited(&kvm->srcu); kfree(bus);