diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml index e0284d8c3b63..38d4cede0860 100644 --- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml +++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml @@ -70,7 +70,9 @@ allOf: properties: compatible: contains: - const: allwinner,sun8i-h3-spdif + enum: + - allwinner,sun8i-h3-spdif + - allwinner,sun50i-h6-spdif then: properties: diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst index 7d7c191102a7..11298f0ce44d 100644 --- a/Documentation/sound/hd-audio/models.rst +++ b/Documentation/sound/hd-audio/models.rst @@ -260,6 +260,9 @@ alc295-hp-x360 HP Spectre X360 fixups alc-sense-combo Headset button support for Chrome platform +huawei-mbx-stereo + Enable initialization verbs for Huawei MBX stereo speakers; + might be risky, try this at your own risk ALC66x/67x/892 ============== diff --git a/Makefile b/Makefile index a5f4e184b552..fa11c1d89acf 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 3 -SUBLEVEL = 3 +SUBLEVEL = 4 EXTRAVERSION = NAME = Bobtail Squid diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts index ebfe28c2f544..a1fd3e63e86e 100644 --- a/arch/arm/boot/dts/am3517-evm.dts +++ b/arch/arm/boot/dts/am3517-evm.dts @@ -124,10 +124,11 @@ }; lcd0: display@0 { - compatible = "panel-dpi"; + /* This isn't the exact LCD, but the timings meet spec */ + /* To make it work, set CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4 */ + compatible = "newhaven,nhd-4.3-480272ef-atxl"; label = "15"; - status = "okay"; - pinctrl-names = "default"; + backlight = <&bl>; enable-gpios = <&gpio6 16 GPIO_ACTIVE_HIGH>; /* gpio176, lcd INI */ vcc-supply = <&vdd_io_reg>; @@ -136,22 +137,6 @@ remote-endpoint = <&dpi_out>; }; }; - - panel-timing { - clock-frequency = <9000000>; - hactive = <480>; - vactive = <272>; - hfront-porch = <3>; - hback-porch = <2>; - hsync-len = <42>; - vback-porch = <3>; - vfront-porch = <4>; - vsync-len = <11>; - hsync-active = <0>; - vsync-active = <0>; - de-active = <1>; - pixelclk-active = <1>; - }; }; bl: backlight { diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts index f78db6809cca..9eb48cabcca4 100644 --- a/arch/arm/boot/dts/exynos5420-peach-pit.dts +++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts @@ -440,6 +440,7 @@ regulator-name = "vdd_ldo10"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; regulator-state-mem { regulator-off-in-suspend; }; diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts index e0f470fe54c8..4398f2d1fe88 100644 --- a/arch/arm/boot/dts/exynos5800-peach-pi.dts +++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts @@ -440,6 +440,7 @@ regulator-name = "vdd_ldo10"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; regulator-state-mem { regulator-off-in-suspend; }; diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi index 895fbde4d433..c1ed83131b49 100644 --- a/arch/arm/boot/dts/imx7-colibri.dtsi +++ b/arch/arm/boot/dts/imx7-colibri.dtsi @@ -323,6 +323,7 @@ vmmc-supply = <®_module_3v3>; vqmmc-supply = <®_DCDC3>; non-removable; + sdhci-caps-mask = <0x80000000 0x0>; }; &iomuxc { diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts index e61567437d73..62d5e9a4a781 100644 --- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts +++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts @@ -44,7 +44,7 @@ <&clks IMX7D_ENET1_TIME_ROOT_CLK>; assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>; assigned-clock-rates = <0>, <100000000>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy0>; fsl,magic-packet; status = "okay"; @@ -70,7 +70,7 @@ <&clks IMX7D_ENET2_TIME_ROOT_CLK>; assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>; assigned-clock-rates = <0>, <100000000>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy1>; fsl,magic-packet; status = "okay"; diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi index 642e809e757a..449cc7616da6 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi @@ -108,7 +108,6 @@ &dss { status = "ok"; vdds_dsi-supply = <&vpll2>; - vdda_video-supply = <&video_reg>; pinctrl-names = "default"; pinctrl-0 = <&dss_dpi_pins1>; port { @@ -124,44 +123,20 @@ display0 = &lcd0; }; - video_reg: video_reg { - pinctrl-names = "default"; - pinctrl-0 = <&panel_pwr_pins>; - compatible = "regulator-fixed"; - regulator-name = "fixed-supply"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */ - }; - lcd0: display { - compatible = "panel-dpi"; + /* This isn't the exact LCD, but the timings meet spec */ + /* To make it work, set CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4 */ + compatible = "newhaven,nhd-4.3-480272ef-atxl"; label = "15"; - status = "okay"; - /* default-on; */ pinctrl-names = "default"; - + pinctrl-0 = <&panel_pwr_pins>; + backlight = <&bl>; + enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; port { lcd_in: endpoint { remote-endpoint = <&dpi_out>; }; }; - - panel-timing { - clock-frequency = <9000000>; - hactive = <480>; - vactive = <272>; - hfront-porch = <3>; - hback-porch = <2>; - hsync-len = <42>; - vback-porch = <3>; - vfront-porch = <4>; - vsync-len = <11>; - hsync-active = <0>; - vsync-active = <0>; - de-active = <1>; - pixelclk-active = <1>; - }; }; bl: backlight { diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index c7bf9c493646..64eb896907bf 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -363,6 +363,7 @@ CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1=m CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1=m CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11=m CONFIG_DRM_TILCDC=m +CONFIG_DRM_PANEL_SIMPLE=m CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_MODE_HELPERS=y diff --git a/arch/arm/mach-at91/.gitignore b/arch/arm/mach-at91/.gitignore new file mode 100644 index 000000000000..2ecd6f51c8a9 --- /dev/null +++ b/arch/arm/mach-at91/.gitignore @@ -0,0 +1 @@ +pm_data-offsets.h diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile index 31b61f0e1c07..de64301dcff2 100644 --- a/arch/arm/mach-at91/Makefile +++ b/arch/arm/mach-at91/Makefile @@ -19,9 +19,10 @@ ifeq ($(CONFIG_PM_DEBUG),y) CFLAGS_pm.o += -DDEBUG endif -include/generated/at91_pm_data-offsets.h: arch/arm/mach-at91/pm_data-offsets.s FORCE +$(obj)/pm_data-offsets.h: $(obj)/pm_data-offsets.s FORCE $(call filechk,offsets,__PM_DATA_OFFSETS_H__) -arch/arm/mach-at91/pm_suspend.o: include/generated/at91_pm_data-offsets.h +$(obj)/pm_suspend.o: $(obj)/pm_data-offsets.h targets += pm_data-offsets.s +clean-files += pm_data-offsets.h diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S index c751f047b116..ed57c879d4e1 100644 --- a/arch/arm/mach-at91/pm_suspend.S +++ b/arch/arm/mach-at91/pm_suspend.S @@ -10,7 +10,7 @@ #include <linux/linkage.h> #include <linux/clk/at91_pmc.h> #include "pm.h" -#include "generated/at91_pm_data-offsets.h" +#include "pm_data-offsets.h" #define SRAMC_SELF_FRESH_ACTIVE 0x01 #define SRAMC_SELF_FRESH_EXIT 0x00 diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c index 1f0da76a39de..7b7280c21ee0 100644 --- a/arch/arm/mach-ep93xx/edb93xx.c +++ b/arch/arm/mach-ep93xx/edb93xx.c @@ -103,7 +103,7 @@ static struct spi_board_info edb93xx_spi_board_info[] __initdata = { }; static struct gpiod_lookup_table edb93xx_spi_cs_gpio_table = { - .dev_id = "ep93xx-spi.0", + .dev_id = "spi0", .table = { GPIO_LOOKUP("A", 6, "cs", GPIO_ACTIVE_LOW), { }, diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c index e2658e22bba1..8a53b74dc4b2 100644 --- a/arch/arm/mach-ep93xx/simone.c +++ b/arch/arm/mach-ep93xx/simone.c @@ -73,7 +73,7 @@ static struct spi_board_info simone_spi_devices[] __initdata = { * v1.3 parts will still work, since the signal on SFRMOUT is automatic. */ static struct gpiod_lookup_table simone_spi_cs_gpio_table = { - .dev_id = "ep93xx-spi.0", + .dev_id = "spi0", .table = { GPIO_LOOKUP("A", 1, "cs", GPIO_ACTIVE_LOW), { }, diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c index 582e06e104fd..e0e1b11032f1 100644 --- a/arch/arm/mach-ep93xx/ts72xx.c +++ b/arch/arm/mach-ep93xx/ts72xx.c @@ -267,7 +267,7 @@ static struct spi_board_info bk3_spi_board_info[] __initdata = { * goes through CPLD */ static struct gpiod_lookup_table bk3_spi_cs_gpio_table = { - .dev_id = "ep93xx-spi.0", + .dev_id = "spi0", .table = { GPIO_LOOKUP("F", 3, "cs", GPIO_ACTIVE_LOW), { }, @@ -316,7 +316,7 @@ static struct spi_board_info ts72xx_spi_devices[] __initdata = { }; static struct gpiod_lookup_table ts72xx_spi_cs_gpio_table = { - .dev_id = "ep93xx-spi.0", + .dev_id = "spi0", .table = { /* DIO_17 */ GPIO_LOOKUP("F", 2, "cs", GPIO_ACTIVE_LOW), diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c index a88a1d807b32..cbcba3136d74 100644 --- a/arch/arm/mach-ep93xx/vision_ep9307.c +++ b/arch/arm/mach-ep93xx/vision_ep9307.c @@ -242,7 +242,7 @@ static struct spi_board_info vision_spi_board_info[] __initdata = { }; static struct gpiod_lookup_table vision_spi_cs_gpio_table = { - .dev_id = "ep93xx-spi.0", + .dev_id = "spi0", .table = { GPIO_LOOKUP_IDX("A", 6, "cs", 0, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("A", 7, "cs", 1, GPIO_ACTIVE_LOW), diff --git a/arch/arm/mach-omap2/.gitignore b/arch/arm/mach-omap2/.gitignore new file mode 100644 index 000000000000..79a8d6ea7152 --- /dev/null +++ b/arch/arm/mach-omap2/.gitignore @@ -0,0 +1 @@ +pm-asm-offsets.h diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 600650551621..21c6d4bca3c0 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -223,9 +223,10 @@ obj-y += omap_phy_internal.o obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o -include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE +$(obj)/pm-asm-offsets.h: $(obj)/pm-asm-offsets.s FORCE $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__) -$(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h +$(obj)/sleep33xx.o $(obj)/sleep43xx.o: $(obj)/pm-asm-offsets.h targets += pm-asm-offsets.s +clean-files += pm-asm-offsets.h diff --git a/arch/arm/mach-omap2/sleep33xx.S b/arch/arm/mach-omap2/sleep33xx.S index 68fee339d3f1..dc221249bc22 100644 --- a/arch/arm/mach-omap2/sleep33xx.S +++ b/arch/arm/mach-omap2/sleep33xx.S @@ -6,7 +6,6 @@ * Dave Gerlach, Vaibhav Bedia */ -#include <generated/ti-pm-asm-offsets.h> #include <linux/linkage.h> #include <linux/platform_data/pm33xx.h> #include <linux/ti-emif-sram.h> @@ -15,6 +14,7 @@ #include "iomap.h" #include "cm33xx.h" +#include "pm-asm-offsets.h" #define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED 0x00030000 #define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003 diff --git a/arch/arm/mach-omap2/sleep43xx.S b/arch/arm/mach-omap2/sleep43xx.S index c1f4e4852644..90d2907a2eb2 100644 --- a/arch/arm/mach-omap2/sleep43xx.S +++ b/arch/arm/mach-omap2/sleep43xx.S @@ -6,7 +6,6 @@ * Dave Gerlach, Vaibhav Bedia */ -#include <generated/ti-pm-asm-offsets.h> #include <linux/linkage.h> #include <linux/ti-emif-sram.h> #include <linux/platform_data/pm33xx.h> @@ -19,6 +18,7 @@ #include "iomap.h" #include "omap-secure.h" #include "omap44xx.h" +#include "pm-asm-offsets.h" #include "prm33xx.h" #include "prcm43xx.h" diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c index a7cfe07156f4..e65ee8180c35 100644 --- a/arch/arm/mach-zynq/platsmp.c +++ b/arch/arm/mach-zynq/platsmp.c @@ -57,7 +57,7 @@ int zynq_cpun_start(u32 address, int cpu) * 0x4: Jump by mov instruction * 0x8: Jumping address */ - memcpy((__force void *)zero, &zynq_secondary_trampoline, + memcpy_toio(zero, &zynq_secondary_trampoline, trampoline_size); writel(address, zero + trampoline_size); diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 61d834157bc0..382e1c2855e8 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -42,6 +42,7 @@ static void mc_copy_user_page(void *from, void *to) * when prefetching destination as well. (NP) */ asm volatile ("\ +.arch xscale \n\ pld [%0, #0] \n\ pld [%0, #32] \n\ pld [%1, #0] \n\ @@ -106,8 +107,9 @@ void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { void *ptr, *kaddr = kmap_atomic(page); - asm volatile( - "mov r1, %2 \n\ + asm volatile("\ +.arch xscale \n\ + mov r1, %2 \n\ mov r2, #0 \n\ mov r3, #0 \n\ 1: mov ip, %0 \n\ diff --git a/arch/arm/plat-samsung/watchdog-reset.c b/arch/arm/plat-samsung/watchdog-reset.c index ce42cc640a61..71d85ff323f7 100644 --- a/arch/arm/plat-samsung/watchdog-reset.c +++ b/arch/arm/plat-samsung/watchdog-reset.c @@ -62,6 +62,7 @@ void samsung_wdt_reset(void) #ifdef CONFIG_OF static const struct of_device_id s3c2410_wdt_match[] = { { .compatible = "samsung,s3c2410-wdt" }, + { .compatible = "samsung,s3c6410-wdt" }, {}, }; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts index 4e916e1f71f7..1c2a9ca491c0 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts @@ -66,8 +66,8 @@ gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>; gpios-states = <0>; - states = <3300000 0 - 1800000 1>; + states = <3300000 0>, + <1800000 1>; }; flash_1v8: regulator-flash_1v8 { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts index b636912a2715..afcf8a9f667b 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts @@ -75,8 +75,8 @@ gpios-states = <1>; /* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */ - states = <1800000 0 - 3300000 1>; + states = <1800000 0>, + <3300000 1>; }; vddio_boot: regulator-vddio_boot { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 9972b1515da6..6039adda12ee 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -77,8 +77,8 @@ gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>; gpios-states = <0>; - states = <3300000 0 - 1800000 1>; + states = <3300000 0>, + <1800000 1>; }; vcc1v8: regulator-vcc1v8 { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi index e8f925871edf..89f7b41b0e9e 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi @@ -46,8 +46,8 @@ gpios-states = <1>; /* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */ - states = <1800000 0 - 3300000 1>; + states = <1800000 0>, + <3300000 1>; regulator-settling-time-up-us = <10000>; regulator-settling-time-down-us = <150000>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts index 796baea7a0bf..c8d74e61dec1 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts @@ -38,8 +38,8 @@ gpios-states = <1>; /* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */ - states = <1800000 0 - 3300000 1>; + states = <1800000 0>, + <3300000 1>; }; vddio_boot: regulator-vddio_boot { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts index 26907ac82930..c433a031841f 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts @@ -38,8 +38,8 @@ gpios-states = <1>; /* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */ - states = <1800000 0 - 3300000 1>; + states = <1800000 0>, + <3300000 1>; }; vddio_boot: regulator-vddio_boot { diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index 52aae341d0da..d1f4eb197af2 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -169,15 +169,14 @@ opp-1300000000 { opp-hz = /bits/ 64 <1300000000>; opp-microvolt = <1000000>; - opp-supported-hw = <0xc>, <0x7>; + opp-supported-hw = <0xc>, <0x4>; clock-latency-ns = <150000>; }; opp-1500000000 { opp-hz = /bits/ 64 <1500000000>; opp-microvolt = <1000000>; - /* Consumer only but rely on speed grading */ - opp-supported-hw = <0x8>, <0x7>; + opp-supported-hw = <0x8>, <0x3>; clock-latency-ns = <150000>; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi index 11c0a7137823..db6df76e97a1 100644 --- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi @@ -61,7 +61,9 @@ protected-clocks = <GCC_BIMC_CDSP_CLK>, <GCC_CDSP_CFG_AHB_CLK>, <GCC_CDSP_BIMC_CLK_SRC>, - <GCC_CDSP_TBU_CLK>; + <GCC_CDSP_TBU_CLK>, + <141>, /* GCC_WCSS_Q6_AHB_CLK */ + <142>; /* GCC_WCSS_Q6_AXIM_CLK */ }; &pms405_spmi_regulators { diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index e9fefd8a7e02..f0f2c555033b 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -801,6 +801,7 @@ <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; + max-frequency = <150000000>; status = "disabled"; }; @@ -812,6 +813,7 @@ <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; + max-frequency = <150000000>; status = "disabled"; }; @@ -823,6 +825,7 @@ <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; + max-frequency = <150000000>; status = "disabled"; }; diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index c8c850bc3dfb..6dd011e0b434 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -26,7 +26,7 @@ * (the optimize attribute silently ignores these options). */ -#define ATOMIC_OP(op, asm_op) \ +#define ATOMIC_OP(op, asm_op, constraint) \ __LL_SC_INLINE void \ __LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \ { \ @@ -40,11 +40,11 @@ __LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \ " stxr %w1, %w0, %2\n" \ " cbnz %w1, 1b" \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i)); \ + : #constraint "r" (i)); \ } \ __LL_SC_EXPORT(arch_atomic_##op); -#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ +#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ __LL_SC_INLINE int \ __LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \ { \ @@ -59,14 +59,14 @@ __LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \ " cbnz %w1, 1b\n" \ " " #mb \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ + : #constraint "r" (i) \ : cl); \ \ return result; \ } \ __LL_SC_EXPORT(arch_atomic_##op##_return##name); -#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ +#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ __LL_SC_INLINE int \ __LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \ { \ @@ -81,7 +81,7 @@ __LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \ " cbnz %w2, 1b\n" \ " " #mb \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ + : #constraint "r" (i) \ : cl); \ \ return result; \ @@ -99,8 +99,8 @@ __LL_SC_EXPORT(arch_atomic_fetch_##op##name); ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) -ATOMIC_OPS(add, add) -ATOMIC_OPS(sub, sub) +ATOMIC_OPS(add, add, I) +ATOMIC_OPS(sub, sub, J) #undef ATOMIC_OPS #define ATOMIC_OPS(...) \ @@ -110,17 +110,17 @@ ATOMIC_OPS(sub, sub) ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) -ATOMIC_OPS(and, and) -ATOMIC_OPS(andnot, bic) -ATOMIC_OPS(or, orr) -ATOMIC_OPS(xor, eor) +ATOMIC_OPS(and, and, ) +ATOMIC_OPS(andnot, bic, ) +ATOMIC_OPS(or, orr, ) +ATOMIC_OPS(xor, eor, ) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define ATOMIC64_OP(op, asm_op) \ +#define ATOMIC64_OP(op, asm_op, constraint) \ __LL_SC_INLINE void \ __LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v)) \ { \ @@ -134,11 +134,11 @@ __LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v)) \ " stxr %w1, %0, %2\n" \ " cbnz %w1, 1b" \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i)); \ + : #constraint "r" (i)); \ } \ __LL_SC_EXPORT(arch_atomic64_##op); -#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ +#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ __LL_SC_INLINE s64 \ __LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\ { \ @@ -153,14 +153,14 @@ __LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\ " cbnz %w1, 1b\n" \ " " #mb \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ + : #constraint "r" (i) \ : cl); \ \ return result; \ } \ __LL_SC_EXPORT(arch_atomic64_##op##_return##name); -#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ +#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ __LL_SC_INLINE s64 \ __LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)) \ { \ @@ -175,7 +175,7 @@ __LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)) \ " cbnz %w2, 1b\n" \ " " #mb \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ + : #constraint "r" (i) \ : cl); \ \ return result; \ @@ -193,8 +193,8 @@ __LL_SC_EXPORT(arch_atomic64_fetch_##op##name); ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) -ATOMIC64_OPS(add, add) -ATOMIC64_OPS(sub, sub) +ATOMIC64_OPS(add, add, I) +ATOMIC64_OPS(sub, sub, J) #undef ATOMIC64_OPS #define ATOMIC64_OPS(...) \ @@ -204,10 +204,10 @@ ATOMIC64_OPS(sub, sub) ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) -ATOMIC64_OPS(and, and) -ATOMIC64_OPS(andnot, bic) -ATOMIC64_OPS(or, orr) -ATOMIC64_OPS(xor, eor) +ATOMIC64_OPS(and, and, L) +ATOMIC64_OPS(andnot, bic, ) +ATOMIC64_OPS(or, orr, L) +ATOMIC64_OPS(xor, eor, L) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP @@ -237,7 +237,7 @@ __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v)) } __LL_SC_EXPORT(arch_atomic64_dec_if_positive); -#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \ +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \ __LL_SC_INLINE u##sz \ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ unsigned long old, \ @@ -265,29 +265,34 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ "2:" \ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ [v] "+Q" (*(u##sz *)ptr) \ - : [old] "Kr" (old), [new] "r" (new) \ + : [old] #constraint "r" (old), [new] "r" (new) \ : cl); \ \ return oldval; \ } \ __LL_SC_EXPORT(__cmpxchg_case_##name##sz); -__CMPXCHG_CASE(w, b, , 8, , , , ) -__CMPXCHG_CASE(w, h, , 16, , , , ) -__CMPXCHG_CASE(w, , , 32, , , , ) -__CMPXCHG_CASE( , , , 64, , , , ) -__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory") -__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory") -__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory") -__CMPXCHG_CASE( , , acq_, 64, , a, , "memory") -__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory") -__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory") -__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory") -__CMPXCHG_CASE( , , rel_, 64, , , l, "memory") -__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory") -__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory") -__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory") -__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory") +/* + * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly + * handle the 'K' constraint for the value 4294967295 - thus we use no + * constraint for 32 bit operations. + */ +__CMPXCHG_CASE(w, b, , 8, , , , , ) +__CMPXCHG_CASE(w, h, , 16, , , , , ) +__CMPXCHG_CASE(w, , , 32, , , , , ) +__CMPXCHG_CASE( , , , 64, , , , , L) +__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", ) +__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", ) +__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", ) +__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L) +__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", ) +__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", ) +__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", ) +__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L) +__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", ) +__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", ) +__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", ) +__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) #undef __CMPXCHG_CASE diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index e7d46631cc42..b1454d117cd2 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -51,14 +51,6 @@ #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ MIDR_ARCHITECTURE_MASK) -#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \ -({ \ - u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \ - u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \ - \ - _model == (model) && rv >= (rv_min) && rv <= (rv_max); \ - }) - #define ARM_CPU_IMP_ARM 0x41 #define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_IMP_CAVIUM 0x43 @@ -159,10 +151,19 @@ struct midr_range { #define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r) #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf) +static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min, + u32 rv_max) +{ + u32 _model = midr & MIDR_CPU_MODEL_MASK; + u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); + + return _model == model && rv >= rv_min && rv <= rv_max; +} + static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) { - return MIDR_IS_CPU_MODEL_RANGE(midr, range->model, - range->rv_min, range->rv_max); + return midr_is_cpu_model_range(midr, range->model, + range->rv_min, range->rv_max); } static inline bool diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index ed57b760f38c..a17393ff6677 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -30,4 +30,6 @@ static inline u32 disr_to_esr(u64 disr) return esr; } +asmlinkage void enter_from_user_mode(void); + #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 8af7a85f76bd..bc3949064725 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -251,6 +251,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) dsb(ishst); __tlbi(vaae1is, addr); dsb(ish); + isb(); } #endif diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index b1fdc486aed8..9323bcc40a58 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -894,7 +894,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _ u32 midr = read_cpuid_id(); /* Cavium ThunderX pass 1.x and 2.x */ - return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, + return midr_is_cpu_model_range(midr, MIDR_THUNDERX, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK)); } diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 320a30dbe35e..84a822748c84 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -30,9 +30,9 @@ * Context tracking subsystem. Used to instrument transitions * between user and kernel mode. */ - .macro ct_user_exit + .macro ct_user_exit_irqoff #ifdef CONFIG_CONTEXT_TRACKING - bl context_tracking_user_exit + bl enter_from_user_mode #endif .endm @@ -792,8 +792,8 @@ el0_cp15: /* * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions */ + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, x25 mov x1, sp bl do_cp15instr @@ -805,8 +805,8 @@ el0_da: * Data abort handling */ mrs x26, far_el1 + ct_user_exit_irqoff enable_daif - ct_user_exit clear_address_tag x0, x26 mov x1, x25 mov x2, sp @@ -818,11 +818,11 @@ el0_ia: */ mrs x26, far_el1 gic_prio_kentry_setup tmp=x0 + ct_user_exit_irqoff enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif - ct_user_exit mov x0, x26 mov x1, x25 mov x2, sp @@ -832,8 +832,8 @@ el0_fpsimd_acc: /* * Floating Point or Advanced SIMD access */ + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, x25 mov x1, sp bl do_fpsimd_acc @@ -842,8 +842,8 @@ el0_sve_acc: /* * Scalable Vector Extension access */ + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, x25 mov x1, sp bl do_sve_acc @@ -852,8 +852,8 @@ el0_fpsimd_exc: /* * Floating Point, Advanced SIMD or SVE exception */ + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, x25 mov x1, sp bl do_fpsimd_exc @@ -868,11 +868,11 @@ el0_sp_pc: * Stack or PC alignment exception handling */ gic_prio_kentry_setup tmp=x0 + ct_user_exit_irqoff enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif - ct_user_exit mov x0, x26 mov x1, x25 mov x2, sp @@ -882,8 +882,8 @@ el0_undef: /* * Undefined instruction */ + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, sp bl do_undefinstr b ret_to_user @@ -891,8 +891,8 @@ el0_sys: /* * System instructions, for trapped cache maintenance instructions */ + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, x25 mov x1, sp bl do_sysinstr @@ -902,17 +902,18 @@ el0_dbg: * Debug exception handling */ tbnz x24, #0, el0_inv // EL0 only + mrs x24, far_el1 gic_prio_kentry_setup tmp=x3 - mrs x0, far_el1 + ct_user_exit_irqoff + mov x0, x24 mov x1, x25 mov x2, sp bl do_debug_exception enable_da_f - ct_user_exit b ret_to_user el0_inv: + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, sp mov x1, #BAD_SYNC mov x2, x25 @@ -925,13 +926,13 @@ el0_irq: kernel_entry 0 el0_irq_naked: gic_prio_irq_setup pmr=x20, tmp=x0 + ct_user_exit_irqoff enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif - ct_user_exit #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR tbz x22, #55, 1f bl do_el0_irq_bp_hardening @@ -958,13 +959,14 @@ ENDPROC(el1_error) el0_error: kernel_entry 0 el0_error_naked: - mrs x1, esr_el1 + mrs x25, esr_el1 gic_prio_kentry_setup tmp=x2 + ct_user_exit_irqoff enable_dbg mov x0, sp + mov x1, x25 bl do_serror enable_da_f - ct_user_exit b ret_to_user ENDPROC(el0_error) diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h new file mode 100644 index 000000000000..25a2a9b479c2 --- /dev/null +++ b/arch/arm64/kernel/image-vars.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linker script variables to be set after section resolution, as + * ld.lld does not like variables assigned before SECTIONS is processed. + */ +#ifndef __ARM64_KERNEL_IMAGE_VARS_H +#define __ARM64_KERNEL_IMAGE_VARS_H + +#ifndef LINKER_SCRIPT +#error This file should only be included in vmlinux.lds.S +#endif + +#ifdef CONFIG_EFI + +__efistub_stext_offset = stext - _text; + +/* + * The EFI stub has its own symbol namespace prefixed by __efistub_, to + * isolate it from the kernel proper. The following symbols are legally + * accessed by the stub, so provide some aliases to make them accessible. + * Only include data symbols here, or text symbols of functions that are + * guaranteed to be safe when executed at another offset than they were + * linked at. The routines below are all implemented in assembler in a + * position independent manner + */ +__efistub_memcmp = __pi_memcmp; +__efistub_memchr = __pi_memchr; +__efistub_memcpy = __pi_memcpy; +__efistub_memmove = __pi_memmove; +__efistub_memset = __pi_memset; +__efistub_strlen = __pi_strlen; +__efistub_strnlen = __pi_strnlen; +__efistub_strcmp = __pi_strcmp; +__efistub_strncmp = __pi_strncmp; +__efistub_strrchr = __pi_strrchr; +__efistub___flush_dcache_area = __pi___flush_dcache_area; + +#ifdef CONFIG_KASAN +__efistub___memcpy = __pi_memcpy; +__efistub___memmove = __pi_memmove; +__efistub___memset = __pi_memset; +#endif + +__efistub__text = _text; +__efistub__end = _end; +__efistub__edata = _edata; +__efistub_screen_info = screen_info; + +#endif + +#endif /* __ARM64_KERNEL_IMAGE_VARS_H */ diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index 2b85c0d6fa3d..c7d38c660372 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -65,46 +65,4 @@ DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \ DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS); -#ifdef CONFIG_EFI - -/* - * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol: - * https://github.com/ClangBuiltLinux/linux/issues/561 - */ -__efistub_stext_offset = ABSOLUTE(stext - _text); - -/* - * The EFI stub has its own symbol namespace prefixed by __efistub_, to - * isolate it from the kernel proper. The following symbols are legally - * accessed by the stub, so provide some aliases to make them accessible. - * Only include data symbols here, or text symbols of functions that are - * guaranteed to be safe when executed at another offset than they were - * linked at. The routines below are all implemented in assembler in a - * position independent manner - */ -__efistub_memcmp = __pi_memcmp; -__efistub_memchr = __pi_memchr; -__efistub_memcpy = __pi_memcpy; -__efistub_memmove = __pi_memmove; -__efistub_memset = __pi_memset; -__efistub_strlen = __pi_strlen; -__efistub_strnlen = __pi_strnlen; -__efistub_strcmp = __pi_strcmp; -__efistub_strncmp = __pi_strncmp; -__efistub_strrchr = __pi_strrchr; -__efistub___flush_dcache_area = __pi___flush_dcache_area; - -#ifdef CONFIG_KASAN -__efistub___memcpy = __pi_memcpy; -__efistub___memmove = __pi_memmove; -__efistub___memset = __pi_memset; -#endif - -__efistub__text = _text; -__efistub__end = _end; -__efistub__edata = _edata; -__efistub_screen_info = screen_info; - -#endif - #endif /* __ARM64_KERNEL_IMAGE_H */ diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 32893b3d9164..742a636861e7 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -7,9 +7,11 @@ */ #include <linux/bug.h> +#include <linux/context_tracking.h> #include <linux/signal.h> #include <linux/personality.h> #include <linux/kallsyms.h> +#include <linux/kprobes.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/hardirq.h> @@ -900,6 +902,13 @@ asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr) nmi_exit(); } +asmlinkage void enter_from_user_mode(void) +{ + CT_WARN_ON(ct_state() != CONTEXT_USER); + user_exit_irqoff(); +} +NOKPROBE_SYMBOL(enter_from_user_mode); + void __pte_error(const char *file, int line, unsigned long val) { pr_err("%s:%d: bad pte %016lx.\n", file, line, val); diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 7fa008374907..803b24d2464a 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -245,6 +245,8 @@ SECTIONS HEAD_SYMBOLS } +#include "image-vars.h" + /* * The HYP init code and ID map text can't be longer than a page each, * and should not cross a page boundary. diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index f3c795278def..b1ee6cb4b17f 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -570,8 +570,12 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { + unsigned long aligned_start, aligned_end; + + aligned_start = __virt_to_phys(start) & PAGE_MASK; + aligned_end = PAGE_ALIGN(__virt_to_phys(end)); + memblock_free(aligned_start, aligned_end - aligned_start); free_reserved_area((void *)start, (void *)end, 0, "initrd"); - memblock_free(__virt_to_phys(start), end - start); } #endif diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 7dbf2be470f6..28a8f7b87ff0 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -286,6 +286,15 @@ skip_pgd: msr sctlr_el1, x18 isb + /* + * Invalidate the local I-cache so that any instructions fetched + * speculatively from the PoC are discarded, since they may have + * been dynamically patched at the PoU. + */ + ic iallu + dsb nsh + isb + /* Set the flag to zero to indicate that we're all done */ str wzr, [flag_ptr] ret diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 326448f9df16..1a42ba885188 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -914,10 +914,14 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo void module_arch_cleanup (struct module *mod) { - if (mod->arch.init_unw_table) + if (mod->arch.init_unw_table) { unw_remove_unwind_table(mod->arch.init_unw_table); - if (mod->arch.core_unw_table) + mod->arch.init_unw_table = NULL; + } + if (mod->arch.core_unw_table) { unw_remove_unwind_table(mod->arch.core_unw_table); + mod->arch.core_unw_table = NULL; + } } void *dereference_module_function_descriptor(struct module *mod, void *ptr) diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h index 533008262b69..5e5601c382b8 100644 --- a/arch/m68k/include/asm/atarihw.h +++ b/arch/m68k/include/asm/atarihw.h @@ -22,7 +22,6 @@ #include <linux/types.h> #include <asm/bootinfo-atari.h> -#include <asm/raw_io.h> #include <asm/kmap.h> extern u_long atari_mch_cookie; @@ -132,14 +131,6 @@ extern struct atari_hw_present atari_hw_present; */ -#define atari_readb raw_inb -#define atari_writeb raw_outb - -#define atari_inb_p raw_inb -#define atari_outb_p raw_outb - - - #include <linux/mm.h> #include <asm/cacheflush.h> diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h index 6c03ca5bc436..819f611dccf2 100644 --- a/arch/m68k/include/asm/io_mm.h +++ b/arch/m68k/include/asm/io_mm.h @@ -29,7 +29,11 @@ #include <asm-generic/iomap.h> #ifdef CONFIG_ATARI -#include <asm/atarihw.h> +#define atari_readb raw_inb +#define atari_writeb raw_outb + +#define atari_inb_p raw_inb +#define atari_outb_p raw_outb #endif diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h index d9a08bed4b12..f653b60f2afc 100644 --- a/arch/m68k/include/asm/macintosh.h +++ b/arch/m68k/include/asm/macintosh.h @@ -4,6 +4,7 @@ #include <linux/seq_file.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <asm/bootinfo-mac.h> diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index c345b79414a9..403f7e193833 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -39,13 +39,11 @@ endif uname := $(shell uname -m) KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64)_defconfig -ifdef CONFIG_PPC64 new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi) ifeq ($(new_nm),y) NM := $(NM) --synthetic endif -endif # BITS is used as extension for files which are available in a 32 bit # and a 64 bit version to simplify shared Makefiles. diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index 186109bdd41b..e04b20625cb9 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -53,9 +53,9 @@ static void export_imc_mode_and_cmd(struct device_node *node, struct imc_pmu *pmu_ptr) { static u64 loc, *imc_mode_addr, *imc_cmd_addr; - int chip = 0, nid; char mode[16], cmd[16]; u32 cb_offset; + struct imc_mem_info *ptr = pmu_ptr->mem_info; imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root); @@ -69,20 +69,20 @@ static void export_imc_mode_and_cmd(struct device_node *node, if (of_property_read_u32(node, "cb_offset", &cb_offset)) cb_offset = IMC_CNTL_BLK_OFFSET; - for_each_node(nid) { - loc = (u64)(pmu_ptr->mem_info[chip].vbase) + cb_offset; + while (ptr->vbase != NULL) { + loc = (u64)(ptr->vbase) + cb_offset; imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET); - sprintf(mode, "imc_mode_%d", nid); + sprintf(mode, "imc_mode_%d", (u32)(ptr->id)); if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent, imc_mode_addr)) goto err; imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET); - sprintf(cmd, "imc_cmd_%d", nid); + sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id)); if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent, imc_cmd_addr)) goto err; - chip++; + ptr++; } return; diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index d00f84add5f4..6d2dbb5089d5 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -586,6 +586,9 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc, struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; + if (!nbytes) + return -EINVAL; + if (unlikely(!xts_ctx->fc)) return xts_fallback_encrypt(desc, dst, src, nbytes); @@ -600,6 +603,9 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc, struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; + if (!nbytes) + return -EINVAL; + if (unlikely(!xts_ctx->fc)) return xts_fallback_decrypt(desc, dst, src, nbytes); diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h index 70d87db54e62..4c0690fc5167 100644 --- a/arch/s390/include/asm/string.h +++ b/arch/s390/include/asm/string.h @@ -71,11 +71,16 @@ extern void *__memmove(void *dest, const void *src, size_t n); #define memcpy(dst, src, len) __memcpy(dst, src, len) #define memmove(dst, src, len) __memmove(dst, src, len) #define memset(s, c, n) __memset(s, c, n) +#define strlen(s) __strlen(s) + +#define __no_sanitize_prefix_strfunc(x) __##x #ifndef __NO_FORTIFY #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ #endif +#else +#define __no_sanitize_prefix_strfunc(x) x #endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */ void *__memset16(uint16_t *s, uint16_t v, size_t count); @@ -163,8 +168,8 @@ static inline char *strcpy(char *dst, const char *src) } #endif -#ifdef __HAVE_ARCH_STRLEN -static inline size_t strlen(const char *s) +#if defined(__HAVE_ARCH_STRLEN) || (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)) +static inline size_t __no_sanitize_prefix_strfunc(strlen)(const char *s) { register unsigned long r0 asm("0") = 0; const char *tmp = s; diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index fe7c205233f1..9ae1c0f05fd2 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -73,6 +73,9 @@ #define INTEL_FAM6_ICELAKE_MOBILE 0x7E #define INTEL_FAM6_ICELAKE_NNPI 0x9D +#define INTEL_FAM6_TIGERLAKE_L 0x8C +#define INTEL_FAM6_TIGERLAKE 0x8D + /* "Small Core" Processors (Atom) */ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bdc16b0aa7c6..dd0ca154a958 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1583,6 +1583,13 @@ bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, struct kvm_lapic_irq *irq); +static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) +{ + /* We can only post Fixed and LowPrio IRQs */ + return (irq->delivery_mode == dest_Fixed || + irq->delivery_mode == dest_LowestPrio); +} + static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) { if (kvm_x86_ops->vcpu_blocking) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index d63e63b7d1d9..251c795b4eb3 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -21,6 +21,7 @@ #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 +#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 /* Protect the PCI config register pairs used for SMN and DF indirect access. */ static DEFINE_MUTEX(smn_mutex); @@ -50,6 +51,7 @@ const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, {} }; EXPORT_SYMBOL_GPL(amd_nb_misc_ids); @@ -63,6 +65,7 @@ static const struct pci_device_id amd_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, {} }; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 08fb79f37793..ad0d5ced82b3 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1495,54 +1495,72 @@ static void lapic_setup_esr(void) oldvalue, value); } -static void apic_pending_intr_clear(void) +#define APIC_IR_REGS APIC_ISR_NR +#define APIC_IR_BITS (APIC_IR_REGS * 32) +#define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG) + +union apic_ir { + unsigned long map[APIC_IR_MAPSIZE]; + u32 regs[APIC_IR_REGS]; +}; + +static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr) { - long long max_loops = cpu_khz ? cpu_khz : 1000000; - unsigned long long tsc = 0, ntsc; - unsigned int queued; - unsigned long value; - int i, j, acked = 0; + int i, bit; + + /* Read the IRRs */ + for (i = 0; i < APIC_IR_REGS; i++) + irr->regs[i] = apic_read(APIC_IRR + i * 0x10); + + /* Read the ISRs */ + for (i = 0; i < APIC_IR_REGS; i++) + isr->regs[i] = apic_read(APIC_ISR + i * 0x10); - if (boot_cpu_has(X86_FEATURE_TSC)) - tsc = rdtsc(); /* - * After a crash, we no longer service the interrupts and a pending - * interrupt from previous kernel might still have ISR bit set. - * - * Most probably by now CPU has serviced that pending interrupt and - * it might not have done the ack_APIC_irq() because it thought, - * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it - * does not clear the ISR bit and cpu thinks it has already serivced - * the interrupt. Hence a vector might get locked. It was noticed - * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. + * If the ISR map is not empty. ACK the APIC and run another round + * to verify whether a pending IRR has been unblocked and turned + * into a ISR. */ - do { - queued = 0; - for (i = APIC_ISR_NR - 1; i >= 0; i--) - queued |= apic_read(APIC_IRR + i*0x10); - - for (i = APIC_ISR_NR - 1; i >= 0; i--) { - value = apic_read(APIC_ISR + i*0x10); - for_each_set_bit(j, &value, 32) { - ack_APIC_irq(); - acked++; - } - } - if (acked > 256) { - pr_err("LAPIC pending interrupts after %d EOI\n", acked); - break; - } - if (queued) { - if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { - ntsc = rdtsc(); - max_loops = (long long)cpu_khz << 10; - max_loops -= ntsc - tsc; - } else { - max_loops--; - } - } - } while (queued && max_loops > 0); - WARN_ON(max_loops <= 0); + if (!bitmap_empty(isr->map, APIC_IR_BITS)) { + /* + * There can be multiple ISR bits set when a high priority + * interrupt preempted a lower priority one. Issue an ACK + * per set bit. + */ + for_each_set_bit(bit, isr->map, APIC_IR_BITS) + ack_APIC_irq(); + return true; + } + + return !bitmap_empty(irr->map, APIC_IR_BITS); +} + +/* + * After a crash, we no longer service the interrupts and a pending + * interrupt from previous kernel might still have ISR bit set. + * + * Most probably by now the CPU has serviced that pending interrupt and it + * might not have done the ack_APIC_irq() because it thought, interrupt + * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear + * the ISR bit and cpu thinks it has already serivced the interrupt. Hence + * a vector might get locked. It was noticed for timer irq (vector + * 0x31). Issue an extra EOI to clear ISR. + * + * If there are pending IRR bits they turn into ISR bits after a higher + * priority ISR bit has been acked. + */ +static void apic_pending_intr_clear(void) +{ + union apic_ir irr, isr; + unsigned int i; + + /* 512 loops are way oversized and give the APIC a chance to obey. */ + for (i = 0; i < 512; i++) { + if (!apic_check_and_ack(&irr, &isr)) + return; + } + /* Dump the IRR/ISR content if that failed */ + pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map); } /** @@ -1565,6 +1583,14 @@ static void setup_local_APIC(void) return; } + /* + * If this comes from kexec/kcrash the APIC might be enabled in + * SPIV. Soft disable it before doing further initialization. + */ + value = apic_read(APIC_SPIV); + value &= ~APIC_SPIV_APIC_ENABLED; + apic_write(APIC_SPIV, value); + #ifdef CONFIG_X86_32 /* Pound the ESR really hard over the head with a big hammer - mbligh */ if (lapic_is_integrated() && apic->disable_esr) { @@ -1610,6 +1636,7 @@ static void setup_local_APIC(void) value &= ~APIC_TPRI_MASK; apic_write(APIC_TASKPRI, value); + /* Clear eventually stale ISR/IRR bits */ apic_pending_intr_clear(); /* diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index fdacb864c3dd..2c5676b0a6e7 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -398,6 +398,17 @@ static int activate_reserved(struct irq_data *irqd) if (!irqd_can_reserve(irqd)) apicd->can_reserve = false; } + + /* + * Check to ensure that the effective affinity mask is a subset + * the user supplied affinity mask, and warn the user if it is not + */ + if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd), + irq_data_get_affinity_mask(irqd))) { + pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n", + irqd->irq); + } + return ret; } diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 96421f97e75c..231fa230ebc7 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -179,6 +179,12 @@ asmlinkage __visible void smp_reboot_interrupt(void) irq_exit(); } +static int register_stop_handler(void) +{ + return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, + NMI_FLAG_FIRST, "smp_stop"); +} + static void native_stop_other_cpus(int wait) { unsigned long flags; @@ -212,39 +218,41 @@ static void native_stop_other_cpus(int wait) apic->send_IPI_allbutself(REBOOT_VECTOR); /* - * Don't wait longer than a second if the caller - * didn't ask us to wait. + * Don't wait longer than a second for IPI completion. The + * wait request is not checked here because that would + * prevent an NMI shutdown attempt in case that not all + * CPUs reach shutdown state. */ timeout = USEC_PER_SEC; - while (num_online_cpus() > 1 && (wait || timeout--)) + while (num_online_cpus() > 1 && timeout--) udelay(1); } - - /* if the REBOOT_VECTOR didn't work, try with the NMI */ - if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) { - if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, - NMI_FLAG_FIRST, "smp_stop")) - /* Note: we ignore failures here */ - /* Hope the REBOOT_IRQ is good enough */ - goto finish; - - /* sync above data before sending IRQ */ - wmb(); - pr_emerg("Shutting down cpus with NMI\n"); + /* if the REBOOT_VECTOR didn't work, try with the NMI */ + if (num_online_cpus() > 1) { + /* + * If NMI IPI is enabled, try to register the stop handler + * and send the IPI. In any case try to wait for the other + * CPUs to stop. + */ + if (!smp_no_nmi_ipi && !register_stop_handler()) { + /* Sync above data before sending IRQ */ + wmb(); - apic->send_IPI_allbutself(NMI_VECTOR); + pr_emerg("Shutting down cpus with NMI\n"); + apic->send_IPI_allbutself(NMI_VECTOR); + } /* - * Don't wait longer than a 10 ms if the caller - * didn't ask us to wait. + * Don't wait longer than 10 ms if the caller didn't + * reqeust it. If wait is true, the machine hangs here if + * one or more CPUs do not reach shutdown state. */ timeout = USEC_PER_MSEC * 10; while (num_online_cpus() > 1 && (wait || timeout--)) udelay(1); } -finish: local_irq_save(flags); disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 22c2720cd948..e7d25f436466 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -304,7 +304,13 @@ static void do_host_cpuid(struct kvm_cpuid_entry2 *entry, u32 function, case 7: case 0xb: case 0xd: + case 0xf: + case 0x10: + case 0x12: case 0x14: + case 0x17: + case 0x18: + case 0x1f: case 0x8000001d: entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; break; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 718f7d9afedc..3b971026a653 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -5395,6 +5395,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ctxt->memopp->addr.mem.ea + ctxt->_eip); done: + if (rc == X86EMUL_PROPAGATE_FAULT) + ctxt->have_exception = true; return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a63964e7cec7..94aa6102010d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -395,8 +395,6 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, mask |= (gpa & shadow_nonpresent_or_rsvd_mask) << shadow_nonpresent_or_rsvd_mask_len; - page_header(__pa(sptep))->mmio_cached = true; - trace_mark_mmio_spte(sptep, gfn, access, gen); mmu_spte_set(sptep, mask); } @@ -5611,13 +5609,13 @@ slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, PT_PAGE_TABLE_LEVEL, lock_flush_tlb); } -static void free_mmu_pages(struct kvm_vcpu *vcpu) +static void free_mmu_pages(struct kvm_mmu *mmu) { - free_page((unsigned long)vcpu->arch.mmu->pae_root); - free_page((unsigned long)vcpu->arch.mmu->lm_root); + free_page((unsigned long)mmu->pae_root); + free_page((unsigned long)mmu->lm_root); } -static int alloc_mmu_pages(struct kvm_vcpu *vcpu) +static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) { struct page *page; int i; @@ -5638,9 +5636,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) if (!page) return -ENOMEM; - vcpu->arch.mmu->pae_root = page_address(page); + mmu->pae_root = page_address(page); for (i = 0; i < 4; ++i) - vcpu->arch.mmu->pae_root[i] = INVALID_PAGE; + mmu->pae_root[i] = INVALID_PAGE; return 0; } @@ -5648,6 +5646,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) int kvm_mmu_create(struct kvm_vcpu *vcpu) { uint i; + int ret; vcpu->arch.mmu = &vcpu->arch.root_mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; @@ -5665,7 +5664,19 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; - return alloc_mmu_pages(vcpu); + + ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu); + if (ret) + return ret; + + ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu); + if (ret) + goto fail_allocate_root; + + return ret; + fail_allocate_root: + free_mmu_pages(&vcpu->arch.guest_mmu); + return ret; } @@ -5943,7 +5954,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm, } EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); -static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only) +void kvm_mmu_zap_all(struct kvm *kvm) { struct kvm_mmu_page *sp, *node; LIST_HEAD(invalid_list); @@ -5952,14 +5963,10 @@ static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only) spin_lock(&kvm->mmu_lock); restart: list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { - if (mmio_only && !sp->mmio_cached) - continue; if (sp->role.invalid && sp->root_count) continue; - if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) { - WARN_ON_ONCE(mmio_only); + if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) goto restart; - } if (cond_resched_lock(&kvm->mmu_lock)) goto restart; } @@ -5968,11 +5975,6 @@ static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only) spin_unlock(&kvm->mmu_lock); } -void kvm_mmu_zap_all(struct kvm *kvm) -{ - return __kvm_mmu_zap_all(kvm, false); -} - void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) { WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); @@ -5994,7 +5996,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) */ if (unlikely(gen == 0)) { kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); - __kvm_mmu_zap_all(kvm, true); + kvm_mmu_zap_all_fast(kvm); } } @@ -6168,7 +6170,8 @@ unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) void kvm_mmu_destroy(struct kvm_vcpu *vcpu) { kvm_mmu_unload(vcpu); - free_mmu_pages(vcpu); + free_mmu_pages(&vcpu->arch.root_mmu); + free_mmu_pages(&vcpu->arch.guest_mmu); mmu_free_memory_caches(vcpu); } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e0368076a1ef..45e425c5e6f5 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -5274,7 +5274,8 @@ get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, kvm_set_msi_irq(kvm, e, &irq); - if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) { + if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) || + !kvm_irq_is_postable(&irq)) { pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n", __func__, irq.vector); return -1; @@ -5328,6 +5329,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, * 1. When cannot target interrupt to a specific vcpu. * 2. Unsetting posted interrupt. * 3. APIC virtialization is disabled for the vcpu. + * 4. IRQ has incompatible delivery mode (SMI, INIT, etc) */ if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set && kvm_vcpu_apicv_active(&svm->vcpu)) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c030c96fc81a..1d11bf4bab8b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7369,10 +7369,14 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, * irqbalance to make the interrupts single-CPU. * * We will support full lowest-priority interrupt later. + * + * In addition, we can only inject generic interrupts using + * the PI mechanism, refuse to route others through it. */ kvm_set_msi_irq(kvm, e, &irq); - if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) { + if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) || + !kvm_irq_is_postable(&irq)) { /* * Make sure the IRTE is in remapped mode if * we don't handle it in posted mode. diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 91602d310a3f..350adc83eb50 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -674,8 +674,14 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, data, offset, len, access); } +static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) +{ + return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) | + rsvd_bits(1, 2); +} + /* - * Load the pae pdptrs. Return true is they are all valid. + * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise. */ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { @@ -694,8 +700,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if ((pdpte[i] & PT_PRESENT_MASK) && - (pdpte[i] & - vcpu->arch.mmu->guest_rsvd_check.rsvd_bits_mask[0][2])) { + (pdpte[i] & pdptr_rsvd_bits(vcpu))) { ret = 0; goto out; } @@ -6528,8 +6533,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; - if (ctxt->have_exception && inject_emulated_exception(vcpu)) + if (ctxt->have_exception) { + /* + * #UD should result in just EMULATION_FAILED, and trap-like + * exception should not be encountered during decode. + */ + WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || + exception_type(ctxt->exception.vector) == EXCPT_TRAP); + inject_emulated_exception(vcpu); return EMULATE_DONE; + } if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu, emulation_type); diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index e6dad600614c..4123100e0eaf 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -861,9 +861,9 @@ void numa_remove_cpu(int cpu) */ const struct cpumask *cpumask_of_node(int node) { - if (node >= nr_node_ids) { + if ((unsigned)node >= nr_node_ids) { printk(KERN_WARNING - "cpumask_of_node(%d): node > nr_node_ids(%u)\n", + "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n", node, nr_node_ids); dump_stack(); return cpu_none_mask; diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index b196524759ec..7f2140414440 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -330,13 +330,15 @@ pti_clone_pgtable(unsigned long start, unsigned long end, pud = pud_offset(p4d, addr); if (pud_none(*pud)) { - addr += PUD_SIZE; + WARN_ON_ONCE(addr & ~PUD_MASK); + addr = round_up(addr + 1, PUD_SIZE); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { - addr += PMD_SIZE; + WARN_ON_ONCE(addr & ~PMD_MASK); + addr = round_up(addr + 1, PMD_SIZE); continue; } @@ -666,6 +668,8 @@ void __init pti_init(void) */ void pti_finalize(void) { + if (!boot_cpu_has(X86_FEATURE_PTI)) + return; /* * We need to clone everything (again) that maps parts of the * kernel image. diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c index 2e796b54cbde..9e2444500428 100644 --- a/arch/x86/platform/intel/iosf_mbi.c +++ b/arch/x86/platform/intel/iosf_mbi.c @@ -17,6 +17,7 @@ #include <linux/debugfs.h> #include <linux/capability.h> #include <linux/pm_qos.h> +#include <linux/wait.h> #include <asm/iosf_mbi.h> @@ -201,23 +202,45 @@ EXPORT_SYMBOL(iosf_mbi_available); #define PUNIT_SEMAPHORE_BIT BIT(0) #define PUNIT_SEMAPHORE_ACQUIRE BIT(1) -static DEFINE_MUTEX(iosf_mbi_punit_mutex); -static DEFINE_MUTEX(iosf_mbi_block_punit_i2c_access_count_mutex); +static DEFINE_MUTEX(iosf_mbi_pmic_access_mutex); static BLOCKING_NOTIFIER_HEAD(iosf_mbi_pmic_bus_access_notifier); -static u32 iosf_mbi_block_punit_i2c_access_count; +static DECLARE_WAIT_QUEUE_HEAD(iosf_mbi_pmic_access_waitq); +static u32 iosf_mbi_pmic_punit_access_count; +static u32 iosf_mbi_pmic_i2c_access_count; static u32 iosf_mbi_sem_address; static unsigned long iosf_mbi_sem_acquired; static struct pm_qos_request iosf_mbi_pm_qos; void iosf_mbi_punit_acquire(void) { - mutex_lock(&iosf_mbi_punit_mutex); + /* Wait for any I2C PMIC accesses from in kernel drivers to finish. */ + mutex_lock(&iosf_mbi_pmic_access_mutex); + while (iosf_mbi_pmic_i2c_access_count != 0) { + mutex_unlock(&iosf_mbi_pmic_access_mutex); + wait_event(iosf_mbi_pmic_access_waitq, + iosf_mbi_pmic_i2c_access_count == 0); + mutex_lock(&iosf_mbi_pmic_access_mutex); + } + /* + * We do not need to do anything to allow the PUNIT to safely access + * the PMIC, other then block in kernel accesses to the PMIC. + */ + iosf_mbi_pmic_punit_access_count++; + mutex_unlock(&iosf_mbi_pmic_access_mutex); } EXPORT_SYMBOL(iosf_mbi_punit_acquire); void iosf_mbi_punit_release(void) { - mutex_unlock(&iosf_mbi_punit_mutex); + bool do_wakeup; + + mutex_lock(&iosf_mbi_pmic_access_mutex); + iosf_mbi_pmic_punit_access_count--; + do_wakeup = iosf_mbi_pmic_punit_access_count == 0; + mutex_unlock(&iosf_mbi_pmic_access_mutex); + + if (do_wakeup) + wake_up(&iosf_mbi_pmic_access_waitq); } EXPORT_SYMBOL(iosf_mbi_punit_release); @@ -256,34 +279,32 @@ static void iosf_mbi_reset_semaphore(void) * already blocked P-Unit accesses because it wants them blocked over multiple * i2c-transfers, for e.g. read-modify-write of an I2C client register. * - * The P-Unit accesses already being blocked is tracked through the - * iosf_mbi_block_punit_i2c_access_count variable which is protected by the - * iosf_mbi_block_punit_i2c_access_count_mutex this mutex is hold for the - * entire duration of the function. - * - * If access is not blocked yet, this function takes the following steps: + * To allow safe PMIC i2c bus accesses this function takes the following steps: * * 1) Some code sends request to the P-Unit which make it access the PMIC * I2C bus. Testing has shown that the P-Unit does not check its internal * PMIC bus semaphore for these requests. Callers of these requests call * iosf_mbi_punit_acquire()/_release() around their P-Unit accesses, these - * functions lock/unlock the iosf_mbi_punit_mutex. - * As the first step we lock the iosf_mbi_punit_mutex, to wait for any in - * flight requests to finish and to block any new requests. + * functions increase/decrease iosf_mbi_pmic_punit_access_count, so first + * we wait for iosf_mbi_pmic_punit_access_count to become 0. + * + * 2) Check iosf_mbi_pmic_i2c_access_count, if access has already + * been blocked by another caller, we only need to increment + * iosf_mbi_pmic_i2c_access_count and we can skip the other steps. * - * 2) Some code makes such P-Unit requests from atomic contexts where it + * 3) Some code makes such P-Unit requests from atomic contexts where it * cannot call iosf_mbi_punit_acquire() as that may sleep. * As the second step we call a notifier chain which allows any code * needing P-Unit resources from atomic context to acquire them before * we take control over the PMIC I2C bus. * - * 3) When CPU cores enter C6 or C7 the P-Unit needs to talk to the PMIC + * 4) When CPU cores enter C6 or C7 the P-Unit needs to talk to the PMIC * if this happens while the kernel itself is accessing the PMIC I2C bus * the SoC hangs. * As the third step we call pm_qos_update_request() to disallow the CPU * to enter C6 or C7. * - * 4) The P-Unit has a PMIC bus semaphore which we can request to stop + * 5) The P-Unit has a PMIC bus semaphore which we can request to stop * autonomous P-Unit tasks from accessing the PMIC I2C bus while we hold it. * As the fourth and final step we request this semaphore and wait for our * request to be acknowledged. @@ -297,12 +318,18 @@ int iosf_mbi_block_punit_i2c_access(void) if (WARN_ON(!mbi_pdev || !iosf_mbi_sem_address)) return -ENXIO; - mutex_lock(&iosf_mbi_block_punit_i2c_access_count_mutex); + mutex_lock(&iosf_mbi_pmic_access_mutex); - if (iosf_mbi_block_punit_i2c_access_count > 0) + while (iosf_mbi_pmic_punit_access_count != 0) { + mutex_unlock(&iosf_mbi_pmic_access_mutex); + wait_event(iosf_mbi_pmic_access_waitq, + iosf_mbi_pmic_punit_access_count == 0); + mutex_lock(&iosf_mbi_pmic_access_mutex); + } + + if (iosf_mbi_pmic_i2c_access_count > 0) goto success; - mutex_lock(&iosf_mbi_punit_mutex); blocking_notifier_call_chain(&iosf_mbi_pmic_bus_access_notifier, MBI_PMIC_BUS_ACCESS_BEGIN, NULL); @@ -330,10 +357,6 @@ int iosf_mbi_block_punit_i2c_access(void) iosf_mbi_sem_acquired = jiffies; dev_dbg(&mbi_pdev->dev, "P-Unit semaphore acquired after %ums\n", jiffies_to_msecs(jiffies - start)); - /* - * Success, keep iosf_mbi_punit_mutex locked till - * iosf_mbi_unblock_punit_i2c_access() gets called. - */ goto success; } @@ -344,15 +367,13 @@ int iosf_mbi_block_punit_i2c_access(void) dev_err(&mbi_pdev->dev, "Error P-Unit semaphore timed out, resetting\n"); error: iosf_mbi_reset_semaphore(); - mutex_unlock(&iosf_mbi_punit_mutex); - if (!iosf_mbi_get_sem(&sem)) dev_err(&mbi_pdev->dev, "P-Unit semaphore: %d\n", sem); success: if (!WARN_ON(ret)) - iosf_mbi_block_punit_i2c_access_count++; + iosf_mbi_pmic_i2c_access_count++; - mutex_unlock(&iosf_mbi_block_punit_i2c_access_count_mutex); + mutex_unlock(&iosf_mbi_pmic_access_mutex); return ret; } @@ -360,17 +381,20 @@ EXPORT_SYMBOL(iosf_mbi_block_punit_i2c_access); void iosf_mbi_unblock_punit_i2c_access(void) { - mutex_lock(&iosf_mbi_block_punit_i2c_access_count_mutex); + bool do_wakeup = false; - iosf_mbi_block_punit_i2c_access_count--; - if (iosf_mbi_block_punit_i2c_access_count == 0) { + mutex_lock(&iosf_mbi_pmic_access_mutex); + iosf_mbi_pmic_i2c_access_count--; + if (iosf_mbi_pmic_i2c_access_count == 0) { iosf_mbi_reset_semaphore(); - mutex_unlock(&iosf_mbi_punit_mutex); dev_dbg(&mbi_pdev->dev, "punit semaphore held for %ums\n", jiffies_to_msecs(jiffies - iosf_mbi_sem_acquired)); + do_wakeup = true; } + mutex_unlock(&iosf_mbi_pmic_access_mutex); - mutex_unlock(&iosf_mbi_block_punit_i2c_access_count_mutex); + if (do_wakeup) + wake_up(&iosf_mbi_pmic_access_waitq); } EXPORT_SYMBOL(iosf_mbi_unblock_punit_i2c_access); @@ -379,10 +403,10 @@ int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb) int ret; /* Wait for the bus to go inactive before registering */ - mutex_lock(&iosf_mbi_punit_mutex); + iosf_mbi_punit_acquire(); ret = blocking_notifier_chain_register( &iosf_mbi_pmic_bus_access_notifier, nb); - mutex_unlock(&iosf_mbi_punit_mutex); + iosf_mbi_punit_release(); return ret; } @@ -403,9 +427,9 @@ int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb) int ret; /* Wait for the bus to go inactive before unregistering */ - mutex_lock(&iosf_mbi_punit_mutex); + iosf_mbi_punit_acquire(); ret = iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(nb); - mutex_unlock(&iosf_mbi_punit_mutex); + iosf_mbi_punit_release(); return ret; } @@ -413,7 +437,7 @@ EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier); void iosf_mbi_assert_punit_acquired(void) { - WARN_ON(!mutex_is_locked(&iosf_mbi_punit_mutex)); + WARN_ON(iosf_mbi_pmic_punit_access_count == 0); } EXPORT_SYMBOL(iosf_mbi_assert_punit_acquired); diff --git a/block/blk-flush.c b/block/blk-flush.c index aedd9320e605..1eec9cbe5a0a 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -214,6 +214,16 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) /* release the tag's ownership to the req cloned from */ spin_lock_irqsave(&fq->mq_flush_lock, flags); + + if (!refcount_dec_and_test(&flush_rq->ref)) { + fq->rq_status = error; + spin_unlock_irqrestore(&fq->mq_flush_lock, flags); + return; + } + + if (fq->rq_status != BLK_STS_OK) + error = fq->rq_status; + hctx = flush_rq->mq_hctx; if (!q->elevator) { blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); diff --git a/block/blk-mq.c b/block/blk-mq.c index 0835f4d8d42e..a79b9ad1aba1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -44,12 +44,12 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); static int blk_mq_poll_stats_bkt(const struct request *rq) { - int ddir, bytes, bucket; + int ddir, sectors, bucket; ddir = rq_data_dir(rq); - bytes = blk_rq_bytes(rq); + sectors = blk_rq_stats_sectors(rq); - bucket = ddir + 2*(ilog2(bytes) - 9); + bucket = ddir + 2 * ilog2(sectors); if (bucket < 0) return -1; @@ -330,6 +330,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, else rq->start_time_ns = 0; rq->io_start_time_ns = 0; + rq->stats_sectors = 0; rq->nr_phys_segments = 0; #if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; @@ -673,9 +674,7 @@ void blk_mq_start_request(struct request *rq) if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { rq->io_start_time_ns = ktime_get_ns(); -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW - rq->throtl_size = blk_rq_sectors(rq); -#endif + rq->stats_sectors = blk_rq_sectors(rq); rq->rq_flags |= RQF_STATS; rq_qos_issue(q, rq); } @@ -905,7 +904,10 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, */ if (blk_mq_req_expired(rq, next)) blk_mq_rq_timed_out(rq, reserved); - if (refcount_dec_and_test(&rq->ref)) + + if (is_flush_rq(rq, hctx)) + rq->end_io(rq, 0); + else if (refcount_dec_and_test(&rq->ref)) __blk_mq_free_request(rq); return true; @@ -2841,6 +2843,8 @@ static unsigned int nr_hw_queues(struct blk_mq_tag_set *set) struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q) { + int ret = -ENOMEM; + /* mark the queue as mq asap */ q->mq_ops = set->ops; @@ -2902,17 +2906,18 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, blk_mq_map_swqueue(q); if (!(set->flags & BLK_MQ_F_NO_SCHED)) { - int ret; - ret = elevator_init_mq(q); if (ret) - return ERR_PTR(ret); + goto err_tag_set; } return q; +err_tag_set: + blk_mq_del_queue_tag_set(q); err_hctxs: kfree(q->queue_hw_ctx); + q->nr_hw_queues = 0; err_sys_init: blk_mq_sysfs_deinit(q); err_poll: diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8ab6c8153223..ee74bffe3504 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2246,7 +2246,8 @@ void blk_throtl_stat_add(struct request *rq, u64 time_ns) struct request_queue *q = rq->q; struct throtl_data *td = q->td; - throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10); + throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq), + time_ns >> 10); } void blk_throtl_bio_endio(struct bio *bio) diff --git a/block/blk.h b/block/blk.h index de6b2e146d6e..d5edfd73d45e 100644 --- a/block/blk.h +++ b/block/blk.h @@ -19,6 +19,7 @@ struct blk_flush_queue { unsigned int flush_queue_delayed:1; unsigned int flush_pending_idx:1; unsigned int flush_running_idx:1; + blk_status_t rq_status; unsigned long flush_pending_since; struct list_head flush_queue[2]; struct list_head flush_data_in_flight; @@ -47,6 +48,12 @@ static inline void __blk_get_queue(struct request_queue *q) kobject_get(&q->kobj); } +static inline bool +is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx) +{ + return hctx->fq->flush_rq == req; +} + struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, int node, int cmd_size, gfp_t flags); void blk_free_flush_queue(struct blk_flush_queue *q); diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 2a2a2e82832e..35e84bc0ec8c 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -377,13 +377,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) * hardware queue, but we may return a request that is for a * different hardware queue. This is because mq-deadline has shared * state for all hardware queues, in terms of sorting, FIFOs, etc. - * - * For a zoned block device, __dd_dispatch_request() may return NULL - * if all the queued write requests are directed at zones that are already - * locked due to on-going write requests. In this case, make sure to mark - * the queue as needing a restart to ensure that the queue is run again - * and the pending writes dispatched once the target zones for the ongoing - * write requests are unlocked in dd_finish_request(). */ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) { @@ -392,9 +385,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) spin_lock(&dd->lock); rq = __dd_dispatch_request(dd); - if (!rq && blk_queue_is_zoned(hctx->queue) && - !list_empty(&dd->fifo_list[WRITE])) - blk_mq_sched_mark_restart_hctx(hctx); spin_unlock(&dd->lock); return rq; @@ -561,6 +551,13 @@ static void dd_prepare_request(struct request *rq, struct bio *bio) * spinlock so that the zone is never unlocked while deadline_fifo_request() * or deadline_next_request() are executing. This function is called for * all requests, whether or not these requests complete successfully. + * + * For a zoned block device, __dd_dispatch_request() may have stopped + * dispatching requests if all the queued requests are write requests directed + * at zones that are already locked due to on-going write requests. To ensure + * write request dispatch progress in this case, mark the queue as needing a + * restart to ensure that the queue is run again after completion of the + * request and zones being unlocked. */ static void dd_finish_request(struct request *rq) { @@ -572,6 +569,8 @@ static void dd_finish_request(struct request *rq) spin_lock_irqsave(&dd->zone_lock, flags); blk_req_zone_write_unlock(rq); + if (!list_empty(&dd->fifo_list[WRITE])) + blk_mq_sched_mark_restart_hctx(rq->mq_hctx); spin_unlock_irqrestore(&dd->zone_lock, flags); } } diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index d696f165a50e..60bbc5090abe 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -219,12 +219,13 @@ static void bsw_pwm_setup(struct lpss_private_data *pdata) } static const struct lpss_device_desc lpt_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR + | LPSS_SAVE_CTX, .prv_offset = 0x800, }; static const struct lpss_device_desc lpt_i2c_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX, .prv_offset = 0x800, }; @@ -236,7 +237,8 @@ static struct property_entry uart_properties[] = { }; static const struct lpss_device_desc lpt_uart_dev_desc = { - .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR + | LPSS_SAVE_CTX, .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 24f065114d42..2c4dda0787e8 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -279,9 +279,13 @@ static int acpi_processor_get_info(struct acpi_device *device) } if (acpi_duplicate_processor_id(pr->acpi_id)) { - dev_err(&device->dev, - "Failed to get unique processor _UID (0x%x)\n", - pr->acpi_id); + if (pr->acpi_id == 0xff) + dev_info_once(&device->dev, + "Entry not well-defined, consider updating BIOS\n"); + else + dev_err(&device->dev, + "Failed to get unique processor _UID (0x%x)\n", + pr->acpi_id); return -ENODEV; } diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index a66e00fe31fe..66205ec54555 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -153,6 +153,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx) int ghes_estatus_pool_init(int num_ghes) { unsigned long addr, len; + int rc; ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1); if (!ghes_estatus_pool) @@ -164,7 +165,7 @@ int ghes_estatus_pool_init(int num_ghes) ghes_estatus_pool_size_request = PAGE_ALIGN(len); addr = (unsigned long)vmalloc(PAGE_ALIGN(len)); if (!addr) - return -ENOMEM; + goto err_pool_alloc; /* * New allocation must be visible in all pgd before it can be found by @@ -172,7 +173,19 @@ int ghes_estatus_pool_init(int num_ghes) */ vmalloc_sync_all(); - return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1); + rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1); + if (rc) + goto err_pool_add; + + return 0; + +err_pool_add: + vfree((void *)addr); + +err_pool_alloc: + gen_pool_destroy(ghes_estatus_pool); + + return -ENOMEM; } static int map_gen_v2(struct ghes *ghes) diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 15f103d7532b..3b2525908dd8 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -365,8 +365,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) union acpi_object *psd = NULL; struct acpi_psd_package *pdomain; - status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer, - ACPI_TYPE_PACKAGE); + status = acpi_evaluate_object_typed(handle, "_PSD", NULL, + &buffer, ACPI_TYPE_PACKAGE); + if (status == AE_NOT_FOUND) /* _PSD is optional */ + return 0; if (ACPI_FAILURE(status)) return -ENODEV; diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c index b2ef4c2ec955..fd66a736621c 100644 --- a/drivers/acpi/custom_method.c +++ b/drivers/acpi/custom_method.c @@ -49,8 +49,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, if ((*ppos > max_size) || (*ppos + count > max_size) || (*ppos + count < count) || - (count > uncopied_bytes)) + (count > uncopied_bytes)) { + kfree(buf); return -EINVAL; + } if (copy_from_user(buf + (*ppos), user_buf, count)) { kfree(buf); @@ -70,6 +72,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE); } + kfree(buf); return count; } diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index d2549ae65e1b..dea8a60e18a4 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -449,8 +449,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev) * No IRQ known to the ACPI subsystem - maybe the BIOS / * driver reported one, then use it. Exit in any case. */ - if (!acpi_pci_irq_valid(dev, pin)) + if (!acpi_pci_irq_valid(dev, pin)) { + kfree(entry); return 0; + } if (acpi_isa_register_gsi(dev)) dev_warn(&dev->dev, "PCI INT %c: no GSI\n", diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index f7652baa6337..3e63294304c7 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -65,6 +65,12 @@ enum board_ids { board_ahci_sb700, /* for SB700 and SB800 */ board_ahci_vt8251, + /* + * board IDs for Intel chipsets that support more than 6 ports + * *and* end up needing the PCS quirk. + */ + board_ahci_pcs7, + /* aliases */ board_ahci_mcp_linux = board_ahci_mcp65, board_ahci_mcp67 = board_ahci_mcp65, @@ -220,6 +226,12 @@ static const struct ata_port_info ahci_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &ahci_vt8251_ops, }, + [board_ahci_pcs7] = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, }; static const struct pci_device_id ahci_pci_tbl[] = { @@ -264,26 +276,26 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ - { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ - { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ @@ -623,30 +635,6 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev, ahci_save_initial_config(&pdev->dev, hpriv); } -static int ahci_pci_reset_controller(struct ata_host *host) -{ - struct pci_dev *pdev = to_pci_dev(host->dev); - int rc; - - rc = ahci_reset_controller(host); - if (rc) - return rc; - - if (pdev->vendor == PCI_VENDOR_ID_INTEL) { - struct ahci_host_priv *hpriv = host->private_data; - u16 tmp16; - - /* configure PCS */ - pci_read_config_word(pdev, 0x92, &tmp16); - if ((tmp16 & hpriv->port_map) != hpriv->port_map) { - tmp16 |= hpriv->port_map; - pci_write_config_word(pdev, 0x92, tmp16); - } - } - - return 0; -} - static void ahci_pci_init_controller(struct ata_host *host) { struct ahci_host_priv *hpriv = host->private_data; @@ -849,7 +837,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev) struct ata_host *host = pci_get_drvdata(pdev); int rc; - rc = ahci_pci_reset_controller(host); + rc = ahci_reset_controller(host); if (rc) return rc; ahci_pci_init_controller(host); @@ -884,7 +872,7 @@ static int ahci_pci_device_resume(struct device *dev) ahci_mcp89_apple_enable(pdev); if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { - rc = ahci_pci_reset_controller(host); + rc = ahci_reset_controller(host); if (rc) return rc; @@ -1619,6 +1607,34 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap, ap->target_lpm_policy = policy; } +static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv) +{ + const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev); + u16 tmp16; + + /* + * Only apply the 6-port PCS quirk for known legacy platforms. + */ + if (!id || id->vendor != PCI_VENDOR_ID_INTEL) + return; + if (((enum board_ids) id->driver_data) < board_ahci_pcs7) + return; + + /* + * port_map is determined from PORTS_IMPL PCI register which is + * implemented as write or write-once register. If the register + * isn't programmed, ahci automatically generates it from number + * of ports, which is good enough for PCS programming. It is + * otherwise expected that platform firmware enables the ports + * before the OS boots. + */ + pci_read_config_word(pdev, PCS_6, &tmp16); + if ((tmp16 & hpriv->port_map) != hpriv->port_map) { + tmp16 |= hpriv->port_map; + pci_write_config_word(pdev, PCS_6, tmp16); + } +} + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int board_id = ent->driver_data; @@ -1731,6 +1747,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* save initial config */ ahci_pci_save_initial_config(pdev, hpriv); + /* + * If platform firmware failed to enable ports, try to enable + * them here. + */ + ahci_intel_pcs_quirk(pdev, hpriv); + /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) { pi.flags |= ATA_FLAG_NCQ; @@ -1840,7 +1862,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - rc = ahci_pci_reset_controller(host); + rc = ahci_reset_controller(host); if (rc) return rc; diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 0570629d719d..3dbf398c92ea 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -247,6 +247,8 @@ enum { ATA_FLAG_ACPI_SATA | ATA_FLAG_AN, ICH_MAP = 0x90, /* ICH MAP register */ + PCS_6 = 0x92, /* 6 port PCS */ + PCS_7 = 0x94, /* 7+ port PCS (Denverton) */ /* em constants */ EM_MAX_SLOTS = 8, diff --git a/drivers/base/soc.c b/drivers/base/soc.c index 10b280f30217..7e91894a380b 100644 --- a/drivers/base/soc.c +++ b/drivers/base/soc.c @@ -157,6 +157,7 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr out1: return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(soc_device_register); /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */ void soc_device_unregister(struct soc_device *soc_dev) @@ -166,6 +167,7 @@ void soc_device_unregister(struct soc_device *soc_dev) device_unregister(&soc_dev->dev); early_soc_dev_attr = NULL; } +EXPORT_SYMBOL_GPL(soc_device_unregister); static int __init soc_bus_register(void) { diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ab7ca5989097..1410fa893653 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1755,6 +1755,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, case LOOP_SET_FD: case LOOP_CHANGE_FD: case LOOP_SET_BLOCK_SIZE: + case LOOP_SET_DIRECT_IO: err = lo_ioctl(bdev, mode, cmd, arg); break; default: diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index e21d2ded732b..a69a90ad9208 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -357,8 +357,10 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, } config = nbd->config; - if (!mutex_trylock(&cmd->lock)) + if (!mutex_trylock(&cmd->lock)) { + nbd_config_put(nbd); return BLK_EH_RESET_TIMER; + } if (config->num_connections > 1) { dev_err_ratelimited(nbd_to_dev(nbd), diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 9044d31ab1a1..8d53b8ef545c 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -67,7 +67,7 @@ static void add_early_randomness(struct hwrng *rng) size_t size = min_t(size_t, 16, rng_buffer_size()); mutex_lock(&reading_mutex); - bytes_read = rng_get_data(rng, rng_buffer, size, 1); + bytes_read = rng_get_data(rng, rng_buffer, size, 0); mutex_unlock(&reading_mutex); if (bytes_read > 0) add_device_randomness(rng_buffer, bytes_read); diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 6707659cffd6..44bd3dda01c2 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -4215,7 +4215,53 @@ static int handle_one_recv_msg(struct ipmi_smi *intf, int chan; ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size); - if (msg->rsp_size < 2) { + + if ((msg->data_size >= 2) + && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) + && (msg->data[1] == IPMI_SEND_MSG_CMD) + && (msg->user_data == NULL)) { + + if (intf->in_shutdown) + goto free_msg; + + /* + * This is the local response to a command send, start + * the timer for these. The user_data will not be + * NULL if this is a response send, and we will let + * response sends just go through. + */ + + /* + * Check for errors, if we get certain errors (ones + * that mean basically we can try again later), we + * ignore them and start the timer. Otherwise we + * report the error immediately. + */ + if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) + && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) + && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) + && (msg->rsp[2] != IPMI_BUS_ERR) + && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { + int ch = msg->rsp[3] & 0xf; + struct ipmi_channel *chans; + + /* Got an error sending the message, handle it. */ + + chans = READ_ONCE(intf->channel_list)->c; + if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) + || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) + ipmi_inc_stat(intf, sent_lan_command_errs); + else + ipmi_inc_stat(intf, sent_ipmb_command_errs); + intf_err_seq(intf, msg->msgid, msg->rsp[2]); + } else + /* The message was sent, start the timer. */ + intf_start_seq_timer(intf, msg->msgid); +free_msg: + requeue = 0; + goto out; + + } else if (msg->rsp_size < 2) { /* Message is too small to be correct. */ dev_warn(intf->si_dev, "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", @@ -4472,62 +4518,16 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf, unsigned long flags = 0; /* keep us warning-free. */ int run_to_completion = intf->run_to_completion; - if ((msg->data_size >= 2) - && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) - && (msg->data[1] == IPMI_SEND_MSG_CMD) - && (msg->user_data == NULL)) { - - if (intf->in_shutdown) - goto free_msg; - - /* - * This is the local response to a command send, start - * the timer for these. The user_data will not be - * NULL if this is a response send, and we will let - * response sends just go through. - */ - - /* - * Check for errors, if we get certain errors (ones - * that mean basically we can try again later), we - * ignore them and start the timer. Otherwise we - * report the error immediately. - */ - if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) - && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) - && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) - && (msg->rsp[2] != IPMI_BUS_ERR) - && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { - int ch = msg->rsp[3] & 0xf; - struct ipmi_channel *chans; - - /* Got an error sending the message, handle it. */ - - chans = READ_ONCE(intf->channel_list)->c; - if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) - || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) - ipmi_inc_stat(intf, sent_lan_command_errs); - else - ipmi_inc_stat(intf, sent_ipmb_command_errs); - intf_err_seq(intf, msg->msgid, msg->rsp[2]); - } else - /* The message was sent, start the timer. */ - intf_start_seq_timer(intf, msg->msgid); - -free_msg: - ipmi_free_smi_msg(msg); - } else { - /* - * To preserve message order, we keep a queue and deliver from - * a tasklet. - */ - if (!run_to_completion) - spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); - list_add_tail(&msg->link, &intf->waiting_rcv_msgs); - if (!run_to_completion) - spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, - flags); - } + /* + * To preserve message order, we keep a queue and deliver from + * a tasklet. + */ + if (!run_to_completion) + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); + list_add_tail(&msg->link, &intf->waiting_rcv_msgs); + if (!run_to_completion) + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, + flags); if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index b08dc50f9f26..9eb564c002f6 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) } #endif +static inline bool should_stop_iteration(void) +{ + if (need_resched()) + cond_resched(); + return fatal_signal_pending(current); +} + /* * This funcion reads the *physical* memory. The f_pos points directly to the * memory location. @@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *file, char __user *buf, p += sz; count -= sz; read += sz; + if (should_stop_iteration()) + break; } kfree(bounce); @@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf, p += sz; count -= sz; written += sz; + if (should_stop_iteration()) + break; } *ppos += written; @@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf, read += sz; low_count -= sz; count -= sz; + if (should_stop_iteration()) { + count = 0; + break; + } } } @@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, buf += sz; read += sz; p += sz; + if (should_stop_iteration()) + break; } free_page((unsigned long)kbuf); } @@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf, p += sz; count -= sz; written += sz; + if (should_stop_iteration()) + break; } *ppos += written; @@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf, buf += sz; virtr += sz; p += sz; + if (should_stop_iteration()) + break; } free_page((unsigned long)kbuf); } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 1b4f95c13e00..d7a3888ad80f 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -320,18 +320,22 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, if (!chip) return -ENODEV; - for (i = 0; i < chip->nr_allocated_banks; i++) - if (digests[i].alg_id != chip->allocated_banks[i].alg_id) - return -EINVAL; + for (i = 0; i < chip->nr_allocated_banks; i++) { + if (digests[i].alg_id != chip->allocated_banks[i].alg_id) { + rc = EINVAL; + goto out; + } + } if (chip->flags & TPM_CHIP_FLAG_TPM2) { rc = tpm2_pcr_extend(chip, pcr_idx, digests); - tpm_put_ops(chip); - return rc; + goto out; } rc = tpm1_pcr_extend(chip, pcr_idx, digests[0].digest, "attempting extend a PCR value"); + +out: tpm_put_ops(chip); return rc; } @@ -354,14 +358,9 @@ int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen) if (!chip) return -ENODEV; - rc = tpm_buf_init(&buf, 0, 0); - if (rc) - goto out; - - memcpy(buf.data, cmd, buflen); + buf.data = cmd; rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command"); - tpm_buf_destroy(&buf); -out: + tpm_put_ops(chip); return rc; } diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index c3181ea9f271..270f43acbb77 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -980,6 +980,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, goto out_err; } + tpm_chip_start(chip); + chip->flags |= TPM_CHIP_FLAG_IRQ; if (irq) { tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, irq); @@ -989,6 +991,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, } else { tpm_tis_probe_irq(chip, intmask); } + tpm_chip_stop(chip); } rc = tpm_chip_register(chip); diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c index 988ebc326bdb..39e34f5066d3 100644 --- a/drivers/cpufreq/armada-8k-cpufreq.c +++ b/drivers/cpufreq/armada-8k-cpufreq.c @@ -136,6 +136,8 @@ static int __init armada_8k_cpufreq_init(void) nb_cpus = num_possible_cpus(); freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL); + if (!freq_tables) + return -ENOMEM; cpumask_copy(&cpus, cpu_possible_mask); /* diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c index 4f85f3112784..35db14cf3102 100644 --- a/drivers/cpufreq/imx-cpufreq-dt.c +++ b/drivers/cpufreq/imx-cpufreq-dt.c @@ -16,6 +16,7 @@ #define OCOTP_CFG3_SPEED_GRADE_SHIFT 8 #define OCOTP_CFG3_SPEED_GRADE_MASK (0x3 << 8) +#define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8) #define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6 #define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6) @@ -34,7 +35,12 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) if (ret) return ret; - speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK) >> OCOTP_CFG3_SPEED_GRADE_SHIFT; + if (of_machine_is_compatible("fsl,imx8mn")) + speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK) + >> OCOTP_CFG3_SPEED_GRADE_SHIFT; + else + speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK) + >> OCOTP_CFG3_SPEED_GRADE_SHIFT; mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT; /* diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c index 7d05efdbd3c6..12d9e6cecf1d 100644 --- a/drivers/cpuidle/governors/teo.c +++ b/drivers/cpuidle/governors/teo.c @@ -242,7 +242,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); int latency_req = cpuidle_governor_latency_req(dev->cpu); unsigned int duration_us, count; - int max_early_idx, idx, i; + int max_early_idx, constraint_idx, idx, i; ktime_t delta_tick; if (cpu_data->last_state >= 0) { @@ -257,6 +257,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, count = 0; max_early_idx = -1; + constraint_idx = drv->state_count; idx = -1; for (i = 0; i < drv->state_count; i++) { @@ -286,16 +287,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (s->target_residency > duration_us) break; - if (s->exit_latency > latency_req) { - /* - * If we break out of the loop for latency reasons, use - * the target residency of the selected state as the - * expected idle duration to avoid stopping the tick - * as long as that target residency is low enough. - */ - duration_us = drv->states[idx].target_residency; - goto refine; - } + if (s->exit_latency > latency_req && constraint_idx > i) + constraint_idx = i; idx = i; @@ -321,7 +314,13 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, duration_us = drv->states[idx].target_residency; } -refine: + /* + * If there is a latency constraint, it may be necessary to use a + * shallower idle state than the one selected so far. + */ + if (constraint_idx < idx) + idx = constraint_idx; + if (idx < 0) { idx = 0; /* No states enabled. Must use 0. */ } else if (idx > 0) { @@ -331,13 +330,12 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, /* * Count and sum the most recent idle duration values less than - * the target residency of the state selected so far, find the - * max. + * the current expected idle duration value. */ for (i = 0; i < INTERVALS; i++) { unsigned int val = cpu_data->intervals[i]; - if (val >= drv->states[idx].target_residency) + if (val >= duration_us) continue; count++; @@ -356,8 +354,10 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, * would be too shallow. */ if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) { - idx = teo_find_shallower_state(drv, dev, idx, avg_us); duration_us = avg_us; + if (drv->states[idx].target_residency > avg_us) + idx = teo_find_shallower_state(drv, dev, + idx, avg_us); } } } diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index ab22bf8a12d6..a0e19802149f 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -254,7 +254,7 @@ static struct devfreq_governor *try_then_request_governor(const char *name) /* Restore previous state before return */ mutex_lock(&devfreq_list_lock); if (err) - return ERR_PTR(err); + return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL); governor = find_devfreq_governor(name); } diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c index d9f377912c10..7c06df8bd74f 100644 --- a/drivers/devfreq/exynos-bus.c +++ b/drivers/devfreq/exynos-bus.c @@ -191,11 +191,10 @@ static void exynos_bus_exit(struct device *dev) if (ret < 0) dev_warn(dev, "failed to disable the devfreq-event devices\n"); - if (bus->regulator) - regulator_disable(bus->regulator); - dev_pm_opp_of_remove_table(dev); clk_disable_unprepare(bus->clk); + if (bus->regulator) + regulator_disable(bus->regulator); } /* @@ -383,6 +382,7 @@ static int exynos_bus_probe(struct platform_device *pdev) struct exynos_bus *bus; int ret, max_state; unsigned long min_freq, max_freq; + bool passive = false; if (!np) { dev_err(dev, "failed to find devicetree node\n"); @@ -396,27 +396,27 @@ static int exynos_bus_probe(struct platform_device *pdev) bus->dev = &pdev->dev; platform_set_drvdata(pdev, bus); - /* Parse the device-tree to get the resource information */ - ret = exynos_bus_parse_of(np, bus); - if (ret < 0) - return ret; - profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL); - if (!profile) { - ret = -ENOMEM; - goto err; - } + if (!profile) + return -ENOMEM; node = of_parse_phandle(dev->of_node, "devfreq", 0); if (node) { of_node_put(node); - goto passive; + passive = true; } else { ret = exynos_bus_parent_parse_of(np, bus); + if (ret < 0) + return ret; } + /* Parse the device-tree to get the resource information */ + ret = exynos_bus_parse_of(np, bus); if (ret < 0) - goto err; + goto err_reg; + + if (passive) + goto passive; /* Initialize the struct profile and governor data for parent device */ profile->polling_ms = 50; @@ -507,6 +507,9 @@ static int exynos_bus_probe(struct platform_device *pdev) err: dev_pm_opp_of_remove_table(dev); clk_disable_unprepare(bus->clk); +err_reg: + if (!passive) + regulator_disable(bus->regulator); return ret; } diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c index 58308948b863..be6eeab9c814 100644 --- a/drivers/devfreq/governor_passive.c +++ b/drivers/devfreq/governor_passive.c @@ -149,7 +149,6 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb, static int devfreq_passive_event_handler(struct devfreq *devfreq, unsigned int event, void *data) { - struct device *dev = devfreq->dev.parent; struct devfreq_passive_data *p_data = (struct devfreq_passive_data *)devfreq->data; struct devfreq *parent = (struct devfreq *)p_data->parent; @@ -165,12 +164,12 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq, p_data->this = devfreq; nb->notifier_call = devfreq_passive_notifier_call; - ret = devm_devfreq_register_notifier(dev, parent, nb, + ret = devfreq_register_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER); break; case DEVFREQ_GOV_STOP: - devm_devfreq_unregister_notifier(dev, parent, nb, - DEVFREQ_TRANSITION_NOTIFIER); + WARN_ON(devfreq_unregister_notifier(parent, nb, + DEVFREQ_TRANSITION_NOTIFIER)); break; default: break; diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 8101ff2f05c1..970f654611bd 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -871,8 +871,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (rc) + if (rc) { + dev_err(&pdev->dev, "Unable to set DMA mask\n"); return rc; + } od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); if (!od) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index c6c0143670d9..a776857d89c8 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -116,9 +116,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) list_for_each_entry_safe(iter, _iter, &iop_chan->chain, chain_node) { pr_debug("\tcookie: %d slot: %d busy: %d " - "this_desc: %#x next_desc: %#x ack: %d\n", + "this_desc: %#x next_desc: %#llx ack: %d\n", iter->async_tx.cookie, iter->idx, busy, - iter->async_tx.phys, iop_desc_get_next_desc(iter), + iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter), async_tx_test_ack(&iter->async_tx)); prefetch(_iter); prefetch(&_iter->async_tx); @@ -306,9 +306,9 @@ iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots, int i; dev_dbg(iop_chan->device->common.dev, "allocated slot: %d " - "(desc %p phys: %#x) slots_per_op %d\n", + "(desc %p phys: %#llx) slots_per_op %d\n", iter->idx, iter->hw_desc, - iter->async_tx.phys, slots_per_op); + (u64)iter->async_tx.phys, slots_per_op); /* pre-ack all but the last descriptor */ if (num_slots != slots_per_op) @@ -516,7 +516,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, return NULL; BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); - dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", + dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n", __func__, len); spin_lock_bh(&iop_chan->lock); @@ -549,7 +549,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); dev_dbg(iop_chan->device->common.dev, - "%s src_cnt: %d len: %u flags: %lx\n", + "%s src_cnt: %d len: %zu flags: %lx\n", __func__, src_cnt, len, flags); spin_lock_bh(&iop_chan->lock); @@ -582,7 +582,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, if (unlikely(!len)) return NULL; - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", + dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n", __func__, src_cnt, len); spin_lock_bh(&iop_chan->lock); @@ -620,7 +620,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); dev_dbg(iop_chan->device->common.dev, - "%s src_cnt: %d len: %u flags: %lx\n", + "%s src_cnt: %d len: %zu flags: %lx\n", __func__, src_cnt, len, flags); if (dmaf_p_disabled_continue(flags)) @@ -683,7 +683,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, return NULL; BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", + dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n", __func__, src_cnt, len); spin_lock_bh(&iop_chan->lock); diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index ceabdea40ae0..982631d4e1f8 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2273,9 +2273,6 @@ static int edma_probe(struct platform_device *pdev) ecc->default_queue = info->default_queue; - for (i = 0; i < ecc->num_slots; i++) - edma_write_slot(ecc, i, &dummy_paramset); - if (info->rsv) { /* Set the reserved slots in inuse list */ rsv_slots = info->rsv->rsv_slots; @@ -2288,6 +2285,12 @@ static int edma_probe(struct platform_device *pdev) } } + for (i = 0; i < ecc->num_slots; i++) { + /* Reset only unused - not reserved - paRAM slots */ + if (!test_bit(i, ecc->slot_inuse)) + edma_write_slot(ecc, i, &dummy_paramset); + } + /* Clear the xbar mapped channels in unused list */ xbar_chans = info->xbar_chans; if (xbar_chans) { diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index c2e693e34d43..bf024ec0116c 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -1866,6 +1866,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc) struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); int irq = irq_desc_get_irq(desc); + unsigned long bits; dberr = (irq == edac->db_irq) ? 1 : 0; sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST : @@ -1875,7 +1876,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc) regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status); - for_each_set_bit(bit, (unsigned long *)&irq_status, 32) { + bits = irq_status; + for_each_set_bit(bit, &bits, 32) { irq = irq_linear_revmap(edac->domain, dberr * 32 + bit); if (irq) generic_handle_irq(irq); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 873437be86d9..608fdab566b3 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -810,7 +810,7 @@ static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl) edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); - for (dimm = 0; dimm < 4; dimm++) { + for (dimm = 0; dimm < 2; dimm++) { size0 = 0; cs0 = dimm * 2; @@ -942,89 +942,102 @@ static void prep_chip_selects(struct amd64_pvt *pvt) } else if (pvt->fam == 0x15 && pvt->model == 0x30) { pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4; pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2; + } else if (pvt->fam >= 0x17) { + int umc; + + for_each_umc(umc) { + pvt->csels[umc].b_cnt = 4; + pvt->csels[umc].m_cnt = 2; + } + } else { pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; } } +static void read_umc_base_mask(struct amd64_pvt *pvt) +{ + u32 umc_base_reg, umc_mask_reg; + u32 base_reg, mask_reg; + u32 *base, *mask; + int cs, umc; + + for_each_umc(umc) { + umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR; + + for_each_chip_select(cs, umc, pvt) { + base = &pvt->csels[umc].csbases[cs]; + + base_reg = umc_base_reg + (cs * 4); + + if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) + edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n", + umc, cs, *base, base_reg); + } + + umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK; + + for_each_chip_select_mask(cs, umc, pvt) { + mask = &pvt->csels[umc].csmasks[cs]; + + mask_reg = umc_mask_reg + (cs * 4); + + if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) + edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n", + umc, cs, *mask, mask_reg); + } + } +} + /* * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers */ static void read_dct_base_mask(struct amd64_pvt *pvt) { - int base_reg0, base_reg1, mask_reg0, mask_reg1, cs; + int cs; prep_chip_selects(pvt); - if (pvt->umc) { - base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR; - base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR; - mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK; - mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK; - } else { - base_reg0 = DCSB0; - base_reg1 = DCSB1; - mask_reg0 = DCSM0; - mask_reg1 = DCSM1; - } + if (pvt->umc) + return read_umc_base_mask(pvt); for_each_chip_select(cs, 0, pvt) { - int reg0 = base_reg0 + (cs * 4); - int reg1 = base_reg1 + (cs * 4); + int reg0 = DCSB0 + (cs * 4); + int reg1 = DCSB1 + (cs * 4); u32 *base0 = &pvt->csels[0].csbases[cs]; u32 *base1 = &pvt->csels[1].csbases[cs]; - if (pvt->umc) { - if (!amd_smn_read(pvt->mc_node_id, reg0, base0)) - edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n", - cs, *base0, reg0); - - if (!amd_smn_read(pvt->mc_node_id, reg1, base1)) - edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n", - cs, *base1, reg1); - } else { - if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0)) - edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", - cs, *base0, reg0); + if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0)) + edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", + cs, *base0, reg0); - if (pvt->fam == 0xf) - continue; + if (pvt->fam == 0xf) + continue; - if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1)) - edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n", - cs, *base1, (pvt->fam == 0x10) ? reg1 - : reg0); - } + if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1)) + edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n", + cs, *base1, (pvt->fam == 0x10) ? reg1 + : reg0); } for_each_chip_select_mask(cs, 0, pvt) { - int reg0 = mask_reg0 + (cs * 4); - int reg1 = mask_reg1 + (cs * 4); + int reg0 = DCSM0 + (cs * 4); + int reg1 = DCSM1 + (cs * 4); u32 *mask0 = &pvt->csels[0].csmasks[cs]; u32 *mask1 = &pvt->csels[1].csmasks[cs]; - if (pvt->umc) { - if (!amd_smn_read(pvt->mc_node_id, reg0, mask0)) - edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n", - cs, *mask0, reg0); + if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0)) + edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", + cs, *mask0, reg0); - if (!amd_smn_read(pvt->mc_node_id, reg1, mask1)) - edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n", - cs, *mask1, reg1); - } else { - if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0)) - edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", - cs, *mask0, reg0); - - if (pvt->fam == 0xf) - continue; + if (pvt->fam == 0xf) + continue; - if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1)) - edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n", - cs, *mask1, (pvt->fam == 0x10) ? reg1 - : reg0); - } + if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1)) + edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n", + cs, *mask1, (pvt->fam == 0x10) ? reg1 + : reg0); } } @@ -2537,13 +2550,6 @@ static void decode_umc_error(int node_id, struct mce *m) err.channel = find_umc_channel(m); - if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) { - err.err_code = ERR_NORM_ADDR; - goto log_error; - } - - error_address_to_page_and_offset(sys_addr, &err); - if (!(m->status & MCI_STATUS_SYNDV)) { err.err_code = ERR_SYND; goto log_error; @@ -2560,6 +2566,13 @@ static void decode_umc_error(int node_id, struct mce *m) err.csrow = m->synd & 0x7; + if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) { + err.err_code = ERR_NORM_ADDR; + goto log_error; + } + + error_address_to_page_and_offset(sys_addr, &err); + log_error: __log_ecc_error(mci, &err, ecc_type); } @@ -3137,12 +3150,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid) static inline void f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt) { - u8 i, ecc_en = 1, cpk_en = 1; + u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1; for_each_umc(i) { if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) { ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED); cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP); + + dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6)); + dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7)); } } @@ -3150,8 +3166,15 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt) if (ecc_en) { mci->edac_ctl_cap |= EDAC_FLAG_SECDED; - if (cpk_en) + if (!cpk_en) + return; + + if (dev_x4) mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; + else if (dev_x16) + mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED; + else + mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED; } } diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 8f66472f7adc..4dce6a2ac75f 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -96,6 +96,7 @@ /* Hardware limit on ChipSelect rows per MC and processors per system */ #define NUM_CHIPSELECTS 8 #define DRAM_RANGES 8 +#define NUM_CONTROLLERS 8 #define ON true #define OFF false @@ -351,8 +352,8 @@ struct amd64_pvt { u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ - /* one for each DCT */ - struct chip_select csels[2]; + /* one for each DCT/UMC */ + struct chip_select csels[NUM_CONTROLLERS]; /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */ struct dram_range ranges[DRAM_RANGES]; diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 64922c8fa7e3..d899d86897d0 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -1235,9 +1235,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, if (p > e->location) *(p - 1) = '\0'; - /* Report the error via the trace interface */ - grain_bits = fls_long(e->grain) + 1; + /* Sanity-check driver-supplied grain value. */ + if (WARN_ON_ONCE(!e->grain)) + e->grain = 1; + + grain_bits = fls_long(e->grain - 1); + /* Report the error via the trace interface */ if (IS_ENABLED(CONFIG_RAS)) trace_mc_event(type, e->msg, e->label, e->error_count, mci->mc_idx, e->top_layer, e->mid_layer, diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index ca25f8fe57ef..1ad538baaa4a 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -260,11 +260,14 @@ static u64 get_sideband_reg_base_addr(void) } } +#define DNV_MCHBAR_SIZE 0x8000 +#define DNV_SB_PORT_SIZE 0x10000 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name) { struct pci_dev *pdev; char *base; u64 addr; + unsigned long size; if (op == 4) { pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL); @@ -279,15 +282,17 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na addr = get_mem_ctrl_hub_base_addr(); if (!addr) return -ENODEV; + size = DNV_MCHBAR_SIZE; } else { /* MMIO via sideband register base address */ addr = get_sideband_reg_base_addr(); if (!addr) return -ENODEV; addr += (port << 16); + size = DNV_SB_PORT_SIZE; } - base = ioremap((resource_size_t)addr, 0x10000); + base = ioremap((resource_size_t)addr, size); if (!base) return -ENODEV; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index b5bc4c7a8fab..b49c9e6f4bf1 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -271,6 +271,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m) struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); struct scmi_shared_mem __iomem *mem = cinfo->payload; + /* + * Ideally channel must be free by now unless OS timeout last + * request and platform continued to process the same, wait + * until it releases the shared memory, otherwise we may endup + * overwriting its response with new message payload or vice-versa + */ + spin_until_cond(ioread32(&mem->channel_status) & + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); /* Mark channel busy + clear error */ iowrite32(0x0, &mem->channel_status); iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 8fa977c7861f..addf0749dd8b 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -390,6 +390,21 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, printk( "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", pfx, pcie->bridge.secondary_status, pcie->bridge.control); + + /* Fatal errors call __ghes_panic() before AER handler prints this */ + if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) && + (gdata->error_severity & CPER_SEV_FATAL)) { + struct aer_capability_regs *aer; + + aer = (struct aer_capability_regs *)pcie->aer_info; + printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n", + pfx, aer->uncor_status, aer->uncor_mask); + printk("%saer_uncor_severity: 0x%08x\n", + pfx, aer->uncor_severity); + printk("%sTLP Header: %08x %08x %08x %08x\n", pfx, + aer->header_log.dw0, aer->header_log.dw1, + aer->header_log.dw2, aer->header_log.dw3); + } } static void cper_print_tstamp(const char *pfx, diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 2ddc118dba1b..74b84244a0db 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -9,6 +9,7 @@ #include <linux/init.h> #include <linux/cpumask.h> #include <linux/export.h> +#include <linux/dma-direct.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/types.h> @@ -440,6 +441,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, phys_addr_t mem_to_map_phys; phys_addr_t dest_phys; phys_addr_t ptr_phys; + dma_addr_t ptr_dma; size_t mem_to_map_sz; size_t dest_sz; size_t src_sz; @@ -457,9 +459,10 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(dest_sz, SZ_64); - ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); + ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL); if (!ptr) return -ENOMEM; + ptr_phys = dma_to_phys(__scm->dev, ptr_dma); /* Fill source vmid detail */ src = ptr; @@ -489,7 +492,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, ptr_phys, src_sz, dest_phys, dest_sz); - dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys); + dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma); if (ret) { dev_err(__scm->dev, "Assign memory protection call failed %d.\n", ret); diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c index 4dbc837d1215..be963113f672 100644 --- a/drivers/gpio/gpio-madera.c +++ b/drivers/gpio/gpio-madera.c @@ -136,6 +136,9 @@ static int madera_gpio_probe(struct platform_device *pdev) madera_gpio->gpio_chip.parent = pdev->dev.parent; switch (madera->type) { + case CS47L15: + madera_gpio->gpio_chip.ngpio = CS47L15_NUM_GPIOS; + break; case CS47L35: madera_gpio->gpio_chip.ngpio = CS47L35_NUM_GPIOS; break; @@ -147,6 +150,11 @@ static int madera_gpio_probe(struct platform_device *pdev) case CS47L91: madera_gpio->gpio_chip.ngpio = CS47L90_NUM_GPIOS; break; + case CS42L92: + case CS47L92: + case CS47L93: + madera_gpio->gpio_chip.ngpio = CS47L92_NUM_GPIOS; + break; default: dev_err(&pdev->dev, "Unknown chip variant %d\n", madera->type); return -EINVAL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index beb2d268d1ef..421ca93a8ab8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2107,6 +2107,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) } static const struct backlight_ops amdgpu_dm_backlight_ops = { + .options = BL_CORE_SUSPENDRESUME, .get_brightness = amdgpu_dm_backlight_get_brightness, .update_status = amdgpu_dm_backlight_update_status, }; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index 5cc3acccda2a..b1e657e137a9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -98,11 +98,14 @@ uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) struct dc_stream_state *stream = context->streams[j]; uint32_t vertical_blank_in_pixels = 0; uint32_t vertical_blank_time = 0; + uint32_t vertical_total_min = stream->timing.v_total; + struct dc_crtc_timing_adjust adjust = stream->adjust; + if (adjust.v_total_max != adjust.v_total_min) + vertical_total_min = adjust.v_total_min; vertical_blank_in_pixels = stream->timing.h_total * - (stream->timing.v_total + (vertical_total_min - stream->timing.v_addressable); - vertical_blank_time = vertical_blank_in_pixels * 10000 / stream->timing.pix_clk_100hz; @@ -171,6 +174,10 @@ void dce11_pplib_apply_display_requirements( struct dc_state *context) { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; + int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; + + if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm) + memory_type_multiplier = MEMORY_TYPE_HBM; pp_display_cfg->all_displays_in_sync = context->bw_ctx.bw.dce.all_displays_in_sync; @@ -183,8 +190,20 @@ void dce11_pplib_apply_display_requirements( pp_display_cfg->cpu_pstate_separation_time = context->bw_ctx.bw.dce.blackout_recovery_time_us; - pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz - / MEMORY_TYPE_MULTIPLIER_CZ; + /* + * TODO: determine whether the bandwidth has reached memory's limitation + * , then change minimum memory clock based on real-time bandwidth + * limitation. + */ + if (ASICREV_IS_VEGA20_P(dc->ctx->asic_id.hw_internal_rev) && (context->stream_count >= 2)) { + pp_display_cfg->min_memory_clock_khz = max(pp_display_cfg->min_memory_clock_khz, + (uint32_t) div64_s64( + div64_s64(dc->bw_vbios->high_yclk.value, + memory_type_multiplier), 10000)); + } else { + pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz + / memory_type_multiplier; + } pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( dc, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index a24a2bda8656..1596ddcb26e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -148,7 +148,7 @@ static void dce_mi_program_pte_vm( pte->min_pte_before_flip_horiz_scan; REG_UPDATE(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, - GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0xff); + GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0x7f); REG_UPDATE_3(DVMM_PTE_CONTROL, DVMM_PAGE_WIDTH, page_width, @@ -157,7 +157,7 @@ static void dce_mi_program_pte_vm( REG_UPDATE_2(DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK, pte->pte_req_per_chunk, - DVMM_MAX_PTE_REQ_OUTSTANDING, 0xff); + DVMM_MAX_PTE_REQ_OUTSTANDING, 0x7f); } static void program_urgency_watermark( diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index c6136e0ed1a4..7a04be74c9cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -987,6 +987,10 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) struct dm_pp_clock_levels_with_latency mem_clks = {0}; struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0}; struct dm_pp_clock_levels clks = {0}; + int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; + + if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm) + memory_type_multiplier = MEMORY_TYPE_HBM; /*do system clock TODO PPLIB: after PPLIB implement, * then remove old way @@ -1026,12 +1030,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) &clks); dc->bw_vbios->low_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); + clks.clocks_in_khz[0] * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, + clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, + clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier, 1000); return; @@ -1067,12 +1071,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) * YCLK = UMACLK*m_memoryTypeMultiplier */ dc->bw_vbios->low_yclk = bw_frc_to_fixed( - mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); + mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, + mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 4a6ba3173a5a..ae38c9c7277c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -847,6 +847,8 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) int i; unsigned int clk; unsigned int latency; + /*original logic in dal3*/ + int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; /*do system clock*/ if (!dm_pp_get_clock_levels_by_type_with_latency( @@ -905,13 +907,16 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula): * YCLK = UMACLK*m_memoryTypeMultiplier */ + if (dc->bw_vbios->memory_type == bw_def_hbm) + memory_type_multiplier = MEMORY_TYPE_HBM; + dc->bw_vbios->low_yclk = bw_frc_to_fixed( - mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); + mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, 1000); dc->bw_vbios->high_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, + mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, 1000); /* Now notify PPLib/SMU about which Watermarks sets they should select diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 47f81072d7e9..c0424b4035a5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -31,6 +31,8 @@ #include "dm_pp_smu.h" #define MEMORY_TYPE_MULTIPLIER_CZ 4 +#define MEMORY_TYPE_HBM 2 + enum dce_version resource_parse_asic_id( struct hw_asic_id asic_id); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 487aeee1cf8a..3c1084de5d59 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4068,6 +4068,11 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) data->frame_time_x2 = frame_time_in_us * 2 / 100; + if (data->frame_time_x2 < 280) { + pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2); + data->frame_time_x2 = 280; + } + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c index d9a5ac81949e..221a8528c993 100644 --- a/drivers/gpu/drm/drm_kms_helper_common.c +++ b/drivers/gpu/drm/drm_kms_helper_common.c @@ -40,7 +40,7 @@ MODULE_LICENSE("GPL and additional rights"); /* Backward compatibility for drm_kms_helper.edid_firmware */ static int edid_firmware_set(const char *val, const struct kernel_param *kp) { - DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware instead.\n"); + DRM_NOTE("drm_kms_helper.edid_firmware is deprecated, please use drm.edid_firmware instead.\n"); return __drm_set_edid_firmware_path(val); } diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 6ba1a08253f0..4cf25458f0b9 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -681,8 +681,8 @@ static int setup_attrs(struct acpi_power_meter_resource *resource) if (resource->caps.flags & POWER_METER_CAN_CAP) { if (!can_cap_in_hardware()) { - dev_err(&resource->acpi_dev->dev, - "Ignoring unsafe software power cap!\n"); + dev_warn(&resource->acpi_dev->dev, + "Ignoring unsafe software power cap!\n"); goto skip_unsafe_cap; } diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index c77e89239dcd..5c1dddde193c 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -349,6 +349,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, {} }; diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index f31413fd9521..800414886f6b 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -202,6 +202,7 @@ static irqreturn_t riic_tend_isr(int irq, void *data) if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) { /* We got a NACKIE */ readb(riic->base + RIIC_ICDRR); /* dummy read */ + riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2); riic->err = -ENXIO; } else if (riic->bytes_left) { return IRQ_NONE; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 9b76a8fcdd24..bf539c34ccd3 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -352,7 +352,7 @@ static bool has_gateway(const struct dst_entry *dst, sa_family_t family) if (family == AF_INET) { rt = container_of(dst, struct rtable, dst); - return rt->rt_gw_family == AF_INET; + return rt->rt_uses_gateway; } rt6 = container_of(dst, struct rt6_info, dst); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 7ddd0e5bc6b3..bb8b71cc3821 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -3484,7 +3484,8 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs, err_copy: ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs)); - + /* It was released in ib_destroy_srq_user */ + srq = NULL; err_free: kfree(srq); err_put: diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 184dba3c2828..d8ff063a5419 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -2326,7 +2326,7 @@ struct opa_port_status_req { __be32 vl_select_mask; }; -#define VL_MASK_ALL 0x000080ff +#define VL_MASK_ALL 0x00000000000080ffUL struct opa_port_status_rsp { __u8 port_num; @@ -2625,15 +2625,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp, } static void a0_portstatus(struct hfi1_pportdata *ppd, - struct opa_port_status_rsp *rsp, u32 vl_select_mask) + struct opa_port_status_rsp *rsp) { if (!is_bx(ppd->dd)) { unsigned long vl; u64 sum_vl_xmit_wait = 0; - u32 vl_all_mask = VL_MASK_ALL; + unsigned long vl_all_mask = VL_MASK_ALL; - for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), - 8 * sizeof(vl_all_mask)) { + for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) { u64 tmp = sum_vl_xmit_wait + read_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl)); @@ -2730,12 +2729,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, (struct opa_port_status_req *)pmp->data; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct opa_port_status_rsp *rsp; - u32 vl_select_mask = be32_to_cpu(req->vl_select_mask); + unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask); unsigned long vl; size_t response_data_size; u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; u8 port_num = req->port_num; - u8 num_vls = hweight32(vl_select_mask); + u8 num_vls = hweight64(vl_select_mask); struct _vls_pctrs *vlinfo; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); @@ -2770,7 +2769,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, hfi1_read_link_quality(dd, &rsp->link_quality_indicator); - rsp->vl_select_mask = cpu_to_be32(vl_select_mask); + rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask); rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL)); rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, @@ -2841,8 +2840,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, * So in the for_each_set_bit() loop below, we don't need * any additional checks for vl. */ - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl)); @@ -2883,7 +2881,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, vfi++; } - a0_portstatus(ppd, rsp, vl_select_mask); + a0_portstatus(ppd, rsp); if (resp_len) *resp_len += response_data_size; @@ -2930,16 +2928,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port, return error_counter_summary; } -static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp, - u32 vl_select_mask) +static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp) { if (!is_bx(ppd->dd)) { unsigned long vl; u64 sum_vl_xmit_wait = 0; - u32 vl_all_mask = VL_MASK_ALL; + unsigned long vl_all_mask = VL_MASK_ALL; - for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), - 8 * sizeof(vl_all_mask)) { + for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) { u64 tmp = sum_vl_xmit_wait + read_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl)); @@ -2994,7 +2990,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, u64 port_mask; u8 port_num; unsigned long vl; - u32 vl_select_mask; + unsigned long vl_select_mask; int vfi; u16 link_width; u16 link_speed; @@ -3071,8 +3067,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, * So in the for_each_set_bit() loop below, we don't need * any additional checks for vl. */ - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(req->vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); rsp->vls[vfi].port_vl_xmit_data = @@ -3120,7 +3115,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, vfi++; } - a0_datacounters(ppd, rsp, vl_select_mask); + a0_datacounters(ppd, rsp); if (resp_len) *resp_len += response_data_size; @@ -3215,7 +3210,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, struct _vls_ectrs *vlinfo; unsigned long vl; u64 port_mask, tmp; - u32 vl_select_mask; + unsigned long vl_select_mask; int vfi; req = (struct opa_port_error_counters64_msg *)pmp->data; @@ -3273,8 +3268,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, vlinfo = &rsp->vls[0]; vfi = 0; vl_select_mask = be32_to_cpu(req->vl_select_mask); - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(req->vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { memset(vlinfo, 0, sizeof(*vlinfo)); rsp->vls[vfi].port_vl_xmit_discards = cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL, @@ -3485,7 +3479,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; u64 portn = be64_to_cpu(req->port_select_mask[3]); u32 counter_select = be32_to_cpu(req->counter_select_mask); - u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ + unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ unsigned long vl; if ((nports != 1) || (portn != 1 << port)) { @@ -3579,8 +3573,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, if (counter_select & CS_UNCORRECTABLE_ERRORS) write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0); - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(vl_select_mask)) { + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) { if (counter_select & CS_PORT_XMIT_DATA) write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0); diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 646f61545ed6..9f53f63b1453 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -874,16 +874,17 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, else pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); - if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) - pbc = hfi1_fault_tx(qp, ps->opcode, pbc); pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); - /* Update HCRC based on packet opcode */ - pbc = update_hcrc(ps->opcode, pbc); + if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) + pbc = hfi1_fault_tx(qp, ps->opcode, pbc); + else + /* Update HCRC based on packet opcode */ + pbc = update_hcrc(ps->opcode, pbc); } tx->wqe = qp->s_wqe; ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc); @@ -1030,12 +1031,12 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, else pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); + pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) pbc = hfi1_fault_tx(qp, ps->opcode, pbc); - pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); - - /* Update HCRC based on packet opcode */ - pbc = update_hcrc(ps->opcode, pbc); + else + /* Update HCRC based on packet opcode */ + pbc = update_hcrc(ps->opcode, pbc); } if (cb) iowait_pio_inc(&priv->s_iowait); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 0569bcab02d4..14807ea8dc3f 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -6959,6 +6959,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); list_del(&mpi->list); mutex_unlock(&mlx5_ib_multiport_mutex); + kfree(mpi); return; } diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index f13f36ae1af6..c6a277e69848 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o -obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o +obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_ARM_SMMU) += arm-smmu.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 61de81965c44..e1259429ded2 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2577,7 +2577,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, bus_addr = address + s->dma_address + (j << PAGE_SHIFT); phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT); - ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC); + ret = iommu_map_page(domain, bus_addr, phys_addr, + PAGE_SIZE, prot, + GFP_ATOMIC | __GFP_NOWARN); if (ret) goto out_unmap; diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h new file mode 100644 index 000000000000..12d540d9b59b --- /dev/null +++ b/drivers/iommu/amd_iommu.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef AMD_IOMMU_H +#define AMD_IOMMU_H + +int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line); + +#ifdef CONFIG_DMI +void amd_iommu_apply_ivrs_quirks(void); +#else +static void amd_iommu_apply_ivrs_quirks(void) { } +#endif + +#endif diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 4413aa67000e..568c52317757 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -32,6 +32,7 @@ #include <asm/irq_remapping.h> #include <linux/crash_dump.h> +#include "amd_iommu.h" #include "amd_iommu_proto.h" #include "amd_iommu_types.h" #include "irq_remapping.h" @@ -1002,7 +1003,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, set_iommu_for_device(iommu, devid); } -static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) +int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) { struct devid_map *entry; struct list_head *list; @@ -1153,6 +1154,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, if (ret) return ret; + amd_iommu_apply_ivrs_quirks(); + /* * First save the recommended feature enable bits from ACPI */ diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c new file mode 100644 index 000000000000..c235f79b7a20 --- /dev/null +++ b/drivers/iommu/amd_iommu_quirks.c @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* + * Quirks for AMD IOMMU + * + * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@xxxxxxxxxxxxx> + */ + +#ifdef CONFIG_DMI +#include <linux/dmi.h> + +#include "amd_iommu.h" + +#define IVHD_SPECIAL_IOAPIC 1 + +struct ivrs_quirk_entry { + u8 id; + u16 devid; +}; + +enum { + DELL_INSPIRON_7375 = 0, + DELL_LATITUDE_5495, + LENOVO_IDEAPAD_330S_15ARR, +}; + +static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = { + /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */ + [DELL_INSPIRON_7375] = { + { .id = 4, .devid = 0xa0 }, + { .id = 5, .devid = 0x2 }, + {} + }, + /* ivrs_ioapic[4]=00:14.0 */ + [DELL_LATITUDE_5495] = { + { .id = 4, .devid = 0xa0 }, + {} + }, + /* ivrs_ioapic[32]=00:14.0 */ + [LENOVO_IDEAPAD_330S_15ARR] = { + { .id = 32, .devid = 0xa0 }, + {} + }, + {} +}; + +static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d) +{ + const struct ivrs_quirk_entry *i; + + for (i = d->driver_data; i->id != 0 && i->devid != 0; i++) + add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0); + + return 0; +} + +static const struct dmi_system_id ivrs_quirks[] __initconst = { + { + .callback = ivrs_ioapic_quirk_cb, + .ident = "Dell Inspiron 7375", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375], + }, + { + .callback = ivrs_ioapic_quirk_cb, + .ident = "Dell Latitude 5495", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495], + }, + { + .callback = ivrs_ioapic_quirk_cb, + .ident = "Lenovo ideapad 330S-15ARR", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "81FB"), + }, + .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR], + }, + {} +}; + +void __init amd_iommu_apply_ivrs_quirks(void) +{ + dmi_check_system(ivrs_quirks); +} +#endif diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index c5c93e48b4db..d1ebe5ce3e47 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2843,11 +2843,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) } /* Boolean feature flags */ +#if 0 /* ATS invalidation is slow and broken */ if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI) smmu->features |= ARM_SMMU_FEAT_PRI; if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS) smmu->features |= ARM_SMMU_FEAT_ATS; +#endif if (reg & IDR0_SEV) smmu->features |= ARM_SMMU_FEAT_SEV; diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 4786ca061e31..81e43c1df7ec 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -376,13 +376,13 @@ static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque) { struct set_msi_sid_data *data = opaque; + if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias)) + data->busmatch_count++; + data->pdev = pdev; data->alias = alias; data->count++; - if (PCI_BUS_NUM(alias) == pdev->bus->number) - data->busmatch_count++; - return 0; } diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 3e1a8a675572..41c605b0058f 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -577,7 +577,9 @@ void queue_iova(struct iova_domain *iovad, spin_unlock_irqrestore(&fq->lock, flags); - if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0) + /* Avoid false sharing as much as possible. */ + if (!atomic_read(&iovad->fq_timer_on) && + !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1)) mod_timer(&iovad->fq_timer, jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 1b5c3672aea2..c3a8d732805f 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -2641,14 +2641,13 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, struct its_node *its = its_dev->its; int i; + bitmap_release_region(its_dev->event_map.lpi_map, + its_get_event_id(irq_domain_get_irq_data(domain, virq)), + get_count_order(nr_irqs)); + for (i = 0; i < nr_irqs; i++) { struct irq_data *data = irq_domain_get_irq_data(domain, virq + i); - u32 event = its_get_event_id(data); - - /* Mark interrupt index as unused */ - clear_bit(event, its_dev->event_map.lpi_map); - /* Nuke the entry in the domain */ irq_domain_reset_irq_data(data); } diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index cf755964f2f8..c72c036aea76 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -244,6 +244,7 @@ static int __init plic_init(struct device_node *node, struct plic_handler *handler; irq_hw_number_t hwirq; int cpu, hartid; + u32 threshold = 0; if (of_irq_parse_one(node, i, &parent)) { pr_err("failed to parse parent for context %d.\n", i); @@ -266,10 +267,16 @@ static int __init plic_init(struct device_node *node, continue; } + /* + * When running in M-mode we need to ignore the S-mode handler. + * Here we assume it always comes later, but that might be a + * little fragile. + */ handler = per_cpu_ptr(&plic_handlers, cpu); if (handler->present) { pr_warn("handler already present for context %d.\n", i); - continue; + threshold = 0xffffffff; + goto done; } handler->present = true; @@ -279,8 +286,9 @@ static int __init plic_init(struct device_node *node, handler->enable_base = plic_regs + ENABLE_BASE + i * ENABLE_PER_HART; +done: /* priority must be > threshold to trigger an interrupt */ - writel(0, handler->hart_base + CONTEXT_THRESHOLD); + writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); for (hwirq = 1; hwirq <= nr_irqs; hwirq++) plic_toggle(handler, hwirq, 0); nr_handlers++; diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index c6ba37df4b9d..dff4132b3702 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -754,6 +754,8 @@ base_sock_create(struct net *net, struct socket *sock, int protocol, int kern) if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; + if (!capable(CAP_NET_RAW)) + return -EPERM; sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern); if (!sk) diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index 8d11a5e23227..eff1bda8b520 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -173,6 +173,7 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig) list_del(&led_cdev->trig_list); write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags); led_set_brightness(led_cdev, LED_OFF); + kfree(event); return ret; } diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c index 180895b83b88..e55a64847fe2 100644 --- a/drivers/leds/leds-lm3532.c +++ b/drivers/leds/leds-lm3532.c @@ -40,7 +40,7 @@ #define LM3532_REG_ZN_3_LO 0x67 #define LM3532_REG_MAX 0x7e -/* Contorl Enable */ +/* Control Enable */ #define LM3532_CTRL_A_ENABLE BIT(0) #define LM3532_CTRL_B_ENABLE BIT(1) #define LM3532_CTRL_C_ENABLE BIT(2) @@ -302,7 +302,7 @@ static int lm3532_led_disable(struct lm3532_led *led_data) int ret; ret = regmap_update_bits(led_data->priv->regmap, LM3532_REG_ENABLE, - ctrl_en_val, ~ctrl_en_val); + ctrl_en_val, 0); if (ret) { dev_err(led_data->priv->dev, "Failed to set ctrl:%d\n", ret); return ret; @@ -321,7 +321,7 @@ static int lm3532_brightness_set(struct led_classdev *led_cdev, mutex_lock(&led->priv->lock); - if (led->mode == LM3532_BL_MODE_ALS) { + if (led->mode == LM3532_ALS_CTRL) { if (brt_val > LED_OFF) ret = lm3532_led_enable(led); else @@ -542,11 +542,14 @@ static int lm3532_parse_node(struct lm3532_data *priv) } if (led->mode == LM3532_BL_MODE_ALS) { + led->mode = LM3532_ALS_CTRL; ret = lm3532_parse_als(priv); if (ret) dev_err(&priv->client->dev, "Failed to parse als\n"); else lm3532_als_configure(priv, led); + } else { + led->mode = LM3532_I2C_CTRL; } led->num_leds = fwnode_property_read_u32_array(child, @@ -590,7 +593,13 @@ static int lm3532_parse_node(struct lm3532_data *priv) goto child_out; } - lm3532_init_registers(led); + ret = lm3532_init_registers(led); + if (ret) { + dev_err(&priv->client->dev, "register init err: %d\n", + ret); + fwnode_handle_put(child); + goto child_out; + } i++; } diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c index 37632fc63741..edb57c42e8b1 100644 --- a/drivers/leds/leds-lp5562.c +++ b/drivers/leds/leds-lp5562.c @@ -260,7 +260,11 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip) { const struct firmware *fw = chip->fw; - if (fw->size > LP5562_PROGRAM_LENGTH) { + /* + * the firmware is encoded in ascii hex character, with 2 chars + * per byte + */ + if (fw->size > (LP5562_PROGRAM_LENGTH * 2)) { dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n", fw->size); return; diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 73f5319295bc..c12cd809ab19 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -105,8 +105,14 @@ struct closure_syncer { static void closure_sync_fn(struct closure *cl) { - cl->s->done = 1; - wake_up_process(cl->s->task); + struct closure_syncer *s = cl->s; + struct task_struct *p; + + rcu_read_lock(); + p = READ_ONCE(s->task); + s->done = 1; + wake_up_process(p); + rcu_read_unlock(); } void __sched __closure_sync(struct closure *cl) diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index c9e44ac1f9a6..21d5c1784d0c 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -408,6 +408,7 @@ static int map_request(struct dm_rq_target_io *tio) ret = dm_dispatch_clone_request(clone, rq); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { blk_rq_unprep_clone(clone); + blk_mq_cleanup_rq(clone); tio->ti->type->release_clone_rq(clone, &tio->info); tio->clone = NULL; return DM_MAPIO_REQUEUE; diff --git a/drivers/md/md.c b/drivers/md/md.c index 24638ccedce4..3100dd53c64c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1826,8 +1826,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_BITMAP)) rdev->saved_raid_disk = -1; - } else - set_bit(In_sync, &rdev->flags); + } else { + /* + * If the array is FROZEN, then the device can't + * be in_sync with rest of array. + */ + if (!test_bit(MD_RECOVERY_FROZEN, + &mddev->recovery)) + set_bit(In_sync, &rdev->flags); + } rdev->raid_disk = role; break; } @@ -4176,7 +4183,7 @@ array_state_show(struct mddev *mddev, char *page) { enum array_state st = inactive; - if (mddev->pers) + if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) switch(mddev->ro) { case 1: st = readonly; @@ -5744,9 +5751,6 @@ int md_run(struct mddev *mddev) md_update_sb(mddev, 0); md_new_event(mddev); - sysfs_notify_dirent_safe(mddev->sysfs_state); - sysfs_notify_dirent_safe(mddev->sysfs_action); - sysfs_notify(&mddev->kobj, NULL, "degraded"); return 0; bitmap_abort: @@ -5767,6 +5771,7 @@ static int do_md_run(struct mddev *mddev) { int err; + set_bit(MD_NOT_READY, &mddev->flags); err = md_run(mddev); if (err) goto out; @@ -5787,9 +5792,14 @@ static int do_md_run(struct mddev *mddev) set_capacity(mddev->gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); + clear_bit(MD_NOT_READY, &mddev->flags); mddev->changed = 1; kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); + sysfs_notify_dirent_safe(mddev->sysfs_state); + sysfs_notify_dirent_safe(mddev->sysfs_action); + sysfs_notify(&mddev->kobj, NULL, "degraded"); out: + clear_bit(MD_NOT_READY, &mddev->flags); return err; } @@ -8900,6 +8910,7 @@ void md_check_recovery(struct mddev *mddev) if (mddev_trylock(mddev)) { int spares = 0; + bool try_set_sync = mddev->safemode != 0; if (!mddev->external && mddev->safemode == 1) mddev->safemode = 0; @@ -8945,7 +8956,7 @@ void md_check_recovery(struct mddev *mddev) } } - if (!mddev->external && !mddev->in_sync) { + if (try_set_sync && !mddev->external && !mddev->in_sync) { spin_lock(&mddev->lock); set_in_sync(mddev); spin_unlock(&mddev->lock); @@ -9043,7 +9054,8 @@ void md_reap_sync_thread(struct mddev *mddev) /* resync has finished, collect result */ md_unregister_thread(&mddev->sync_thread); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && - !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && + mddev->degraded != mddev->raid_disks) { /* success...*/ /* activate any spares */ if (mddev->pers->spare_active(mddev)) { diff --git a/drivers/md/md.h b/drivers/md/md.h index 10f98200e2f8..08f2aee383e8 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -248,6 +248,9 @@ enum mddev_flags { MD_UPDATING_SB, /* md_check_recovery is updating the metadata * without explicitly holding reconfig_mutex. */ + MD_NOT_READY, /* do_md_run() is active, so 'array_state' + * must not report that array is ready yet + */ }; enum mddev_sb_flags { diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index bf5cf184a260..297bbc0f41f0 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -19,6 +19,9 @@ #include "raid0.h" #include "raid5.h" +static int default_layout = 0; +module_param(default_layout, int, 0644); + #define UNSUPPORTED_MDDEV_FLAGS \ ((1L << MD_HAS_JOURNAL) | \ (1L << MD_JOURNAL_CLEAN) | \ @@ -139,6 +142,19 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) } pr_debug("md/raid0:%s: FINAL %d zones\n", mdname(mddev), conf->nr_strip_zones); + + if (conf->nr_strip_zones == 1) { + conf->layout = RAID0_ORIG_LAYOUT; + } else if (default_layout == RAID0_ORIG_LAYOUT || + default_layout == RAID0_ALT_MULTIZONE_LAYOUT) { + conf->layout = default_layout; + } else { + pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n", + mdname(mddev)); + pr_err("md/raid0: please set raid.default_layout to 1 or 2\n"); + err = -ENOTSUPP; + goto abort; + } /* * now since we have the hard sector sizes, we can make sure * chunk size is a multiple of that sector size @@ -547,10 +563,12 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) static bool raid0_make_request(struct mddev *mddev, struct bio *bio) { + struct r0conf *conf = mddev->private; struct strip_zone *zone; struct md_rdev *tmp_dev; sector_t bio_sector; sector_t sector; + sector_t orig_sector; unsigned chunk_sects; unsigned sectors; @@ -584,8 +602,21 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) bio = split; } + orig_sector = sector; zone = find_zone(mddev->private, §or); - tmp_dev = map_sector(mddev, zone, sector, §or); + switch (conf->layout) { + case RAID0_ORIG_LAYOUT: + tmp_dev = map_sector(mddev, zone, orig_sector, §or); + break; + case RAID0_ALT_MULTIZONE_LAYOUT: + tmp_dev = map_sector(mddev, zone, sector, §or); + break; + default: + WARN("md/raid0:%s: Invalid layout\n", mdname(mddev)); + bio_io_error(bio); + return true; + } + bio_set_dev(bio, tmp_dev->bdev); bio->bi_iter.bi_sector = sector + zone->dev_start + tmp_dev->data_offset; diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h index 540e65d92642..3816e5477db1 100644 --- a/drivers/md/raid0.h +++ b/drivers/md/raid0.h @@ -8,11 +8,25 @@ struct strip_zone { int nb_dev; /* # of devices attached to the zone */ }; +/* Linux 3.14 (20d0189b101) made an unintended change to + * the RAID0 layout for multi-zone arrays (where devices aren't all + * the same size. + * RAID0_ORIG_LAYOUT restores the original layout + * RAID0_ALT_MULTIZONE_LAYOUT uses the altered layout + * The layouts are identical when there is only one zone (all + * devices the same size). + */ + +enum r0layout { + RAID0_ORIG_LAYOUT = 1, + RAID0_ALT_MULTIZONE_LAYOUT = 2, +}; struct r0conf { struct strip_zone *strip_zone; struct md_rdev **devlist; /* lists of rdevs, pointed to * by strip_zone->dev */ int nr_strip_zones; + enum r0layout layout; }; #endif diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 34e26834ad28..5afbb7df06e7 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -447,19 +447,21 @@ static void raid1_end_write_request(struct bio *bio) /* We never try FailFast to WriteMostly devices */ !test_bit(WriteMostly, &rdev->flags)) { md_error(r1_bio->mddev, rdev); - if (!test_bit(Faulty, &rdev->flags)) - /* This is the only remaining device, - * We need to retry the write without - * FailFast - */ - set_bit(R1BIO_WriteError, &r1_bio->state); - else { - /* Finished with this branch */ - r1_bio->bios[mirror] = NULL; - to_put = bio; - } - } else + } + + /* + * When the device is faulty, it is not necessary to + * handle write error. + * For failfast, this is the only remaining device, + * We need to retry the write without FailFast. + */ + if (!test_bit(Faulty, &rdev->flags)) set_bit(R1BIO_WriteError, &r1_bio->state); + else { + /* Finished with this branch */ + r1_bio->bios[mirror] = NULL; + to_put = bio; + } } else { /* * Set R1BIO_Uptodate in our master bio, so that we @@ -3127,6 +3129,13 @@ static int raid1_run(struct mddev *mddev) !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || test_bit(Faulty, &conf->mirrors[i].rdev->flags)) mddev->degraded++; + /* + * RAID1 needs at least one disk in active + */ + if (conf->raid_disks - mddev->degraded < 1) { + ret = -EINVAL; + goto abort; + } if (conf->raid_disks - mddev->degraded == 1) mddev->recovery_cp = MaxSector; @@ -3160,8 +3169,12 @@ static int raid1_run(struct mddev *mddev) ret = md_integrity_register(mddev); if (ret) { md_unregister_thread(&mddev->thread); - raid1_free(mddev, conf); + goto abort; } + return 0; + +abort: + raid1_free(mddev, conf); return ret; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3de4e13bde98..39f8ef6ee59c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2526,7 +2526,8 @@ static void raid5_end_read_request(struct bio * bi) int set_bad = 0; clear_bit(R5_UPTODATE, &sh->dev[i].flags); - atomic_inc(&rdev->read_errors); + if (!(bi->bi_status == BLK_STS_PROTECTION)) + atomic_inc(&rdev->read_errors); if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) pr_warn_ratelimited( "md/raid:%s: read error on replacement device (sector %llu on %s).\n", @@ -2558,7 +2559,9 @@ static void raid5_end_read_request(struct bio * bi) && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) retry = 1; if (retry) - if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { + if (sh->qd_idx >= 0 && sh->pd_idx == i) + set_bit(R5_ReadError, &sh->dev[i].flags); + else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { set_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); } else @@ -5718,7 +5721,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) do_flush = false; } - set_bit(STRIPE_HANDLE, &sh->state); + if (!sh->batch_head) + set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); if ((!sh->batch_head || sh == sh->batch_head) && (bi->bi_opf & REQ_SYNC) && diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c index 52a867bde15f..4d82a5522072 100644 --- a/drivers/media/cec/cec-notifier.c +++ b/drivers/media/cec/cec-notifier.c @@ -218,6 +218,8 @@ void cec_notifier_unregister(struct cec_notifier *n) mutex_lock(&n->lock); n->callback = NULL; + n->cec_adap->notifier = NULL; + n->cec_adap = NULL; mutex_unlock(&n->lock); cec_notifier_put(n); } diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index 40d76eb4c2fe..5a9ba3846f0a 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -872,17 +872,19 @@ EXPORT_SYMBOL_GPL(vb2_queue_release); __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) { struct video_device *vfd = video_devdata(file); - __poll_t res = 0; + __poll_t res; + + res = vb2_core_poll(q, file, wait); if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { struct v4l2_fh *fh = file->private_data; poll_wait(file, &fh->wait, wait); if (v4l2_event_pending(fh)) - res = EPOLLPRI; + res |= EPOLLPRI; } - return res | vb2_core_poll(q, file, wait); + return res; } EXPORT_SYMBOL_GPL(vb2_poll); diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 209186c5cd9b..06ea30a689d7 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -152,6 +152,9 @@ static void dvb_frontend_free(struct kref *ref) static void dvb_frontend_put(struct dvb_frontend *fe) { + /* call detach before dropping the reference count */ + if (fe->ops.detach) + fe->ops.detach(fe); /* * Check if the frontend was registered, as otherwise * kref was not initialized yet. @@ -3040,7 +3043,6 @@ void dvb_frontend_detach(struct dvb_frontend *fe) dvb_frontend_invoke_release(fe, fe->ops.release_sec); dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release); dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release); - dvb_frontend_invoke_release(fe, fe->ops.detach); dvb_frontend_put(fe); } EXPORT_SYMBOL(dvb_frontend_detach); diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index a3393cd4e584..7557fbf9d306 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -339,8 +339,10 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev, if (npads) { dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads), GFP_KERNEL); - if (!dvbdev->pads) + if (!dvbdev->pads) { + kfree(dvbdev->entity); return -ENOMEM; + } } switch (type) { diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c index ba0c49107bd2..d45b4ddc8f91 100644 --- a/drivers/media/dvb-frontends/dvb-pll.c +++ b/drivers/media/dvb-frontends/dvb-pll.c @@ -9,6 +9,7 @@ #include <linux/slab.h> #include <linux/module.h> +#include <linux/idr.h> #include <linux/dvb/frontend.h> #include <asm/types.h> @@ -34,8 +35,7 @@ struct dvb_pll_priv { }; #define DVB_PLL_MAX 64 - -static unsigned int dvb_pll_devcount; +static DEFINE_IDA(pll_ida); static int debug; module_param(debug, int, 0644); @@ -787,6 +787,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, struct dvb_pll_priv *priv = NULL; int ret; const struct dvb_pll_desc *desc; + int nr; b1 = kmalloc(1, GFP_KERNEL); if (!b1) @@ -795,9 +796,14 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, b1[0] = 0; msg.buf = b1; - if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && - (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) - pll_desc_id = id[dvb_pll_devcount]; + nr = ida_simple_get(&pll_ida, 0, DVB_PLL_MAX, GFP_KERNEL); + if (nr < 0) { + kfree(b1); + return NULL; + } + + if (id[nr] > DVB_PLL_UNDEFINED && id[nr] < ARRAY_SIZE(pll_list)) + pll_desc_id = id[nr]; BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list)); @@ -808,24 +814,20 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, fe->ops.i2c_gate_ctrl(fe, 1); ret = i2c_transfer (i2c, &msg, 1); - if (ret != 1) { - kfree(b1); - return NULL; - } + if (ret != 1) + goto out; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); - if (!priv) { - kfree(b1); - return NULL; - } + if (!priv) + goto out; priv->pll_i2c_address = pll_addr; priv->i2c = i2c; priv->pll_desc = desc; - priv->nr = dvb_pll_devcount++; + priv->nr = nr; memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops, sizeof(struct dvb_tuner_ops)); @@ -858,6 +860,11 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, kfree(b1); return fe; +out: + kfree(b1); + ida_simple_remove(&pll_ida, nr); + + return NULL; } EXPORT_SYMBOL(dvb_pll_attach); @@ -894,9 +901,10 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id) static int dvb_pll_remove(struct i2c_client *client) { - struct dvb_frontend *fe; + struct dvb_frontend *fe = i2c_get_clientdata(client); + struct dvb_pll_priv *priv = fe->tuner_priv; - fe = i2c_get_clientdata(client); + ida_simple_remove(&pll_ida, priv->nr); dvb_pll_release(fe); return 0; } diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 759d60c6d630..afe7920557a8 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -3022,9 +3022,14 @@ static int ov5640_probe(struct i2c_client *client, /* request optional power down pin */ sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH); + if (IS_ERR(sensor->pwdn_gpio)) + return PTR_ERR(sensor->pwdn_gpio); + /* request optional reset pin */ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(sensor->reset_gpio)) + return PTR_ERR(sensor->reset_gpio); v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops); diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c index 124c8df04633..58972c884705 100644 --- a/drivers/media/i2c/ov5645.c +++ b/drivers/media/i2c/ov5645.c @@ -45,6 +45,8 @@ #define OV5645_CHIP_ID_HIGH_BYTE 0x56 #define OV5645_CHIP_ID_LOW 0x300b #define OV5645_CHIP_ID_LOW_BYTE 0x45 +#define OV5645_IO_MIPI_CTRL00 0x300e +#define OV5645_PAD_OUTPUT00 0x3019 #define OV5645_AWB_MANUAL_CONTROL 0x3406 #define OV5645_AWB_MANUAL_ENABLE BIT(0) #define OV5645_AEC_PK_MANUAL 0x3503 @@ -55,6 +57,7 @@ #define OV5645_ISP_VFLIP BIT(2) #define OV5645_TIMING_TC_REG21 0x3821 #define OV5645_SENSOR_MIRROR BIT(1) +#define OV5645_MIPI_CTRL00 0x4800 #define OV5645_PRE_ISP_TEST_SETTING_1 0x503d #define OV5645_TEST_PATTERN_MASK 0x3 #define OV5645_SET_TEST_PATTERN(x) ((x) & OV5645_TEST_PATTERN_MASK) @@ -121,7 +124,6 @@ static const struct reg_value ov5645_global_init_setting[] = { { 0x3503, 0x07 }, { 0x3002, 0x1c }, { 0x3006, 0xc3 }, - { 0x300e, 0x45 }, { 0x3017, 0x00 }, { 0x3018, 0x00 }, { 0x302e, 0x0b }, @@ -350,7 +352,10 @@ static const struct reg_value ov5645_global_init_setting[] = { { 0x3a1f, 0x14 }, { 0x0601, 0x02 }, { 0x3008, 0x42 }, - { 0x3008, 0x02 } + { 0x3008, 0x02 }, + { OV5645_IO_MIPI_CTRL00, 0x40 }, + { OV5645_MIPI_CTRL00, 0x24 }, + { OV5645_PAD_OUTPUT00, 0x70 } }; static const struct reg_value ov5645_setting_sxga[] = { @@ -737,13 +742,9 @@ static int ov5645_s_power(struct v4l2_subdev *sd, int on) goto exit; } - ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0, - OV5645_SYSTEM_CTRL0_STOP); - if (ret < 0) { - ov5645_set_power_off(ov5645); - goto exit; - } + usleep_range(500, 1000); } else { + ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x58); ov5645_set_power_off(ov5645); } } @@ -1049,11 +1050,20 @@ static int ov5645_s_stream(struct v4l2_subdev *subdev, int enable) dev_err(ov5645->dev, "could not sync v4l2 controls\n"); return ret; } + + ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x45); + if (ret < 0) + return ret; + ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0, OV5645_SYSTEM_CTRL0_START); if (ret < 0) return ret; } else { + ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x40); + if (ret < 0) + return ret; + ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0, OV5645_SYSTEM_CTRL0_STOP); if (ret < 0) diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c index 30ab2225fbd0..b350f5c1a989 100644 --- a/drivers/media/i2c/ov9650.c +++ b/drivers/media/i2c/ov9650.c @@ -703,6 +703,11 @@ static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain) for (m = 6; m >= 0; m--) if (gain >= (1 << m) * 16) break; + + /* Sanity check: don't adjust the gain with a negative value */ + if (m < 0) + return -EINVAL; + rgain = (gain - ((1 << m) * 16)) / (1 << m); rgain |= (((1 << m) - 1) << 4); diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c index a62ede096636..5e68182001ec 100644 --- a/drivers/media/i2c/tda1997x.c +++ b/drivers/media/i2c/tda1997x.c @@ -2691,7 +2691,13 @@ static int tda1997x_probe(struct i2c_client *client, } ret = 0x34 + ((io_read(sd, REG_SLAVE_ADDR)>>4) & 0x03); - state->client_cec = i2c_new_dummy(client->adapter, ret); + state->client_cec = devm_i2c_new_dummy_device(&client->dev, + client->adapter, ret); + if (IS_ERR(state->client_cec)) { + ret = PTR_ERR(state->client_cec); + goto err_free_mutex; + } + v4l_info(client, "CEC slave address 0x%02x\n", ret); ret = tda1997x_core_init(sd); @@ -2798,7 +2804,6 @@ static int tda1997x_remove(struct i2c_client *client) media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(&state->hdl); regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies); - i2c_unregister_device(state->client_cec); cancel_delayed_work(&state->delayed_work_enable_hpd); mutex_destroy(&state->page_lock); mutex_destroy(&state->lock); diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c index 493b1858815f..04e85765373e 100644 --- a/drivers/media/pci/saa7134/saa7134-i2c.c +++ b/drivers/media/pci/saa7134/saa7134-i2c.c @@ -342,7 +342,11 @@ static const struct i2c_client saa7134_client_template = { /* ----------------------------------------------------------- */ -/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */ +/* + * On Medion 7134 reading the SAA7134 chip config EEPROM needs DVB-T + * demod i2c gate closed due to an address clash between this EEPROM + * and the demod one. + */ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev) { u8 subaddr = 0x7, dmdregval; @@ -359,14 +363,14 @@ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev) ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2); if ((ret == 2) && (dmdregval & 0x2)) { - pr_debug("%s: DVB-T demod i2c gate was left closed\n", + pr_debug("%s: DVB-T demod i2c gate was left open\n", dev->name); data[0] = subaddr; data[1] = (dmdregval & ~0x2); if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1) - pr_err("%s: EEPROM i2c gate open failure\n", - dev->name); + pr_err("%s: EEPROM i2c gate close failure\n", + dev->name); } } diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c index dca20a3d98e2..f96226930670 100644 --- a/drivers/media/pci/saa7146/hexium_gemini.c +++ b/drivers/media/pci/saa7146/hexium_gemini.c @@ -292,6 +292,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER); if (ret < 0) { pr_err("cannot register capture v4l2 device. skipping.\n"); + saa7146_vv_release(dev); + i2c_del_adapter(&hexium->i2c_adapter); + kfree(hexium); return ret; } diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c index f899ac3b4a61..4ef37cfc8446 100644 --- a/drivers/media/platform/aspeed-video.c +++ b/drivers/media/platform/aspeed-video.c @@ -630,7 +630,7 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video) } if (hsync_counter < 0 || vsync_counter < 0) { - u32 ctrl; + u32 ctrl = 0; if (hsync_counter < 0) { ctrl = VE_CTRL_HSYNC_POL; @@ -650,7 +650,8 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video) V4L2_DV_VSYNC_POS_POL; } - aspeed_video_update(video, VE_CTRL, 0, ctrl); + if (ctrl) + aspeed_video_update(video, VE_CTRL, 0, ctrl); } } diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index e043d55133a3..b7cc8e651e32 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c @@ -806,6 +806,7 @@ static int fimc_is_probe(struct platform_device *pdev) return -ENODEV; is->pmu_regs = of_iomap(node, 0); + of_node_put(node); if (!is->pmu_regs) return -ENOMEM; diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index d53427a8db11..a838189d4490 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -501,6 +501,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd) continue; ret = fimc_md_parse_port_node(fmd, port, index); + of_node_put(port); if (ret < 0) { of_node_put(node); goto cleanup; @@ -542,6 +543,7 @@ static int __of_get_csis_id(struct device_node *np) if (!np) return -EINVAL; of_property_read_u32(np, "reg", ®); + of_node_put(np); return reg - FIMC_INPUT_MIPI_CSI2_0; } diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c index 691be788e38b..b74e4f50d7d9 100644 --- a/drivers/media/platform/fsl-viu.c +++ b/drivers/media/platform/fsl-viu.c @@ -32,7 +32,7 @@ #define VIU_VERSION "0.5.1" /* Allow building this driver with COMPILE_TEST */ -#ifndef CONFIG_PPC +#if !defined(CONFIG_PPC) && !defined(CONFIG_MICROBLAZE) #define out_be32(v, a) iowrite32be(a, (void __iomem *)v) #define in_be32(a) ioread32be((void __iomem *)a) #endif diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c index fc9faec85edb..5d44f2e92dd5 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c @@ -110,7 +110,9 @@ static int mtk_mdp_probe(struct platform_device *pdev) mutex_init(&mdp->vpulock); /* Old dts had the components as child nodes */ - if (of_get_next_child(dev->of_node, NULL)) { + node = of_get_next_child(dev->of_node, NULL); + if (node) { + of_node_put(node); parent = dev->of_node; dev_warn(dev, "device tree is out of date\n"); } else { diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 83216fc7156b..9cdb43859ae0 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -719,6 +719,10 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe, s_stream, mode); pipe->do_propagation = true; } + + /* Stop at the first external sub-device. */ + if (subdev->dev != isp->dev) + break; } return 0; @@ -833,6 +837,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe) &subdev->entity); failure = -ETIMEDOUT; } + + /* Stop at the first external sub-device. */ + if (subdev->dev != isp->dev) + break; } return failure; diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c index 1ba8a5ba343f..e2f336c715a4 100644 --- a/drivers/media/platform/omap3isp/ispccdc.c +++ b/drivers/media/platform/omap3isp/ispccdc.c @@ -2602,6 +2602,7 @@ int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc, int ret; /* Register the subdev and video node. */ + ccdc->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &ccdc->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c index efca45bb02c8..d0a49cdfd22d 100644 --- a/drivers/media/platform/omap3isp/ispccp2.c +++ b/drivers/media/platform/omap3isp/ispccp2.c @@ -1031,6 +1031,7 @@ int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2, int ret; /* Register the subdev and video nodes. */ + ccp2->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &ccp2->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c index e85917f4a50c..fd493c5e4e24 100644 --- a/drivers/media/platform/omap3isp/ispcsi2.c +++ b/drivers/media/platform/omap3isp/ispcsi2.c @@ -1198,6 +1198,7 @@ int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2, int ret; /* Register the subdev and video nodes. */ + csi2->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &csi2->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c index 40e22400cf5e..97d660606d98 100644 --- a/drivers/media/platform/omap3isp/isppreview.c +++ b/drivers/media/platform/omap3isp/isppreview.c @@ -2225,6 +2225,7 @@ int omap3isp_preview_register_entities(struct isp_prev_device *prev, int ret; /* Register the subdev and video nodes. */ + prev->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &prev->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c index 21ca6954df72..78d9dd7ea2da 100644 --- a/drivers/media/platform/omap3isp/ispresizer.c +++ b/drivers/media/platform/omap3isp/ispresizer.c @@ -1681,6 +1681,7 @@ int omap3isp_resizer_register_entities(struct isp_res_device *res, int ret; /* Register the subdev and video nodes. */ + res->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &res->subdev); if (ret < 0) goto error; diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c index 62b2eacb96fd..5b9b57f4d9bf 100644 --- a/drivers/media/platform/omap3isp/ispstat.c +++ b/drivers/media/platform/omap3isp/ispstat.c @@ -1026,6 +1026,8 @@ void omap3isp_stat_unregister_entities(struct ispstat *stat) int omap3isp_stat_register_entities(struct ispstat *stat, struct v4l2_device *vdev) { + stat->subdev.dev = vdev->mdev->dev; + return v4l2_device_register_subdev(vdev, &stat->subdev); } diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c index 43aae9b6bb20..c23ec127c277 100644 --- a/drivers/media/platform/rcar_fdp1.c +++ b/drivers/media/platform/rcar_fdp1.c @@ -2306,7 +2306,7 @@ static int fdp1_probe(struct platform_device *pdev) fdp1->fcp = rcar_fcp_get(fcp_node); of_node_put(fcp_node); if (IS_ERR(fdp1->fcp)) { - dev_err(&pdev->dev, "FCP not found (%ld)\n", + dev_dbg(&pdev->dev, "FCP not found (%ld)\n", PTR_ERR(fdp1->fcp)); return PTR_ERR(fdp1->fcp); } diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c index 3e916c8befb7..7a52f585cab7 100644 --- a/drivers/media/platform/vivid/vivid-ctrls.c +++ b/drivers/media/platform/vivid/vivid-ctrls.c @@ -1473,7 +1473,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap, v4l2_ctrl_handler_init(hdl_vid_cap, 55); v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_vid_out, 26); - if (!no_error_inj || dev->has_fb) + if (!no_error_inj || dev->has_fb || dev->num_hdmi_outputs) v4l2_ctrl_new_custom(hdl_vid_out, &vivid_ctrl_class, NULL); v4l2_ctrl_handler_init(hdl_vbi_cap, 21); v4l2_ctrl_new_custom(hdl_vbi_cap, &vivid_ctrl_class, NULL); diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c index 6cf495a7d5cc..003319d7816d 100644 --- a/drivers/media/platform/vivid/vivid-kthread-cap.c +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c @@ -232,8 +232,8 @@ static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf, return vbuf; } -static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf, - struct vivid_buffer *vid_cap_buf) +static noinline_for_stack int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, + u8 *vcapbuf, struct vivid_buffer *vid_cap_buf) { bool blank = dev->must_blank[vid_cap_buf->vb.vb2_buf.index]; struct tpg_data *tpg = &dev->tpg; @@ -658,6 +658,8 @@ static void vivid_cap_update_frame_period(struct vivid_dev *dev) u64 f_period; f_period = (u64)dev->timeperframe_vid_cap.numerator * 1000000000; + if (WARN_ON(dev->timeperframe_vid_cap.denominator == 0)) + dev->timeperframe_vid_cap.denominator = 1; do_div(f_period, dev->timeperframe_vid_cap.denominator); if (dev->field_cap == V4L2_FIELD_ALTERNATE) f_period >>= 1; @@ -670,7 +672,8 @@ static void vivid_cap_update_frame_period(struct vivid_dev *dev) dev->cap_frame_period = f_period; } -static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs) +static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev, + int dropped_bufs) { struct vivid_buffer *vid_cap_buf = NULL; struct vivid_buffer *vbi_cap_buf = NULL; diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c index 104b6f514536..d7b43037e500 100644 --- a/drivers/media/platform/vsp1/vsp1_dl.c +++ b/drivers/media/platform/vsp1/vsp1_dl.c @@ -557,8 +557,10 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm) /* Get a default body for our list. */ dl->body0 = vsp1_dl_body_get(dlm->pool); - if (!dl->body0) + if (!dl->body0) { + kfree(dl); return NULL; + } header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries); diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c index 49073747b1e7..fedff68d8c49 100644 --- a/drivers/media/radio/si470x/radio-si470x-usb.c +++ b/drivers/media/radio/si470x/radio-si470x-usb.c @@ -734,7 +734,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf, /* start radio */ retval = si470x_start_usb(radio); if (retval < 0) - goto err_all; + goto err_buf; /* set initial frequency */ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */ @@ -749,6 +749,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf, return 0; err_all: + usb_kill_urb(radio->int_in_urb); +err_buf: kfree(radio->buffer); err_ctrl: v4l2_ctrl_handler_free(&radio->hdl); @@ -822,6 +824,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf) mutex_lock(&radio->lock); v4l2_device_disconnect(&radio->v4l2_dev); video_unregister_device(&radio->videodev); + usb_kill_urb(radio->int_in_urb); usb_set_intfdata(intf, NULL); mutex_unlock(&radio->lock); v4l2_device_put(&radio->v4l2_dev); diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c index ea05e125016a..872d6441e512 100644 --- a/drivers/media/rc/iguanair.c +++ b/drivers/media/rc/iguanair.c @@ -413,6 +413,10 @@ static int iguanair_probe(struct usb_interface *intf, int ret, pipein, pipeout; struct usb_host_interface *idesc; + idesc = intf->altsetting; + if (idesc->desc.bNumEndpoints < 2) + return -ENODEV; + ir = kzalloc(sizeof(*ir), GFP_KERNEL); rc = rc_allocate_device(RC_DRIVER_IR_RAW); if (!ir || !rc) { @@ -427,18 +431,13 @@ static int iguanair_probe(struct usb_interface *intf, ir->urb_in = usb_alloc_urb(0, GFP_KERNEL); ir->urb_out = usb_alloc_urb(0, GFP_KERNEL); - if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out) { + if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out || + !usb_endpoint_is_int_in(&idesc->endpoint[0].desc) || + !usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) { ret = -ENOMEM; goto out; } - idesc = intf->altsetting; - - if (idesc->desc.bNumEndpoints < 2) { - ret = -ENODEV; - goto out; - } - ir->rc = rc; ir->dev = &intf->dev; ir->udev = udev; diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 7bee72108b0e..37a850421fbb 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -1826,12 +1826,17 @@ static void imon_get_ffdc_type(struct imon_context *ictx) break; /* iMON VFD, MCE IR */ case 0x46: - case 0x7e: case 0x9e: dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; allowed_protos = RC_PROTO_BIT_RC6_MCE; break; + /* iMON VFD, iMON or MCE IR */ + case 0x7e: + dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR"); + detected_display_type = IMON_DISPLAY_TYPE_VFD; + allowed_protos |= RC_PROTO_BIT_RC6_MCE; + break; /* iMON LCD, MCE IR */ case 0x9f: dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR"); diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 4d5351ebb940..9929fcdec74d 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -31,21 +31,22 @@ #include <linux/pm_wakeup.h> #include <media/rc-core.h> -#define DRIVER_VERSION "1.94" +#define DRIVER_VERSION "1.95" #define DRIVER_AUTHOR "Jarod Wilson <jarod@xxxxxxxxxx>" #define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \ "device driver" #define DRIVER_NAME "mceusb" +#define USB_TX_TIMEOUT 1000 /* in milliseconds */ #define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */ #define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */ /* MCE constants */ -#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */ +#define MCE_IRBUF_SIZE 128 /* TX IR buffer length */ #define MCE_TIME_UNIT 50 /* Approx 50us resolution */ -#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */ -#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */ -#define MCE_IRDATA_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */ +#define MCE_PACKET_SIZE 31 /* Max length of packet (with header) */ +#define MCE_IRDATA_HEADER (0x80 + MCE_PACKET_SIZE - 1) + /* Actual format is 0x80 + num_bytes */ #define MCE_IRDATA_TRAILER 0x80 /* End of IR data */ #define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */ #define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */ @@ -607,9 +608,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, if (len <= skip) return; - dev_dbg(dev, "%cx data: %*ph (length=%d)", - (out ? 't' : 'r'), - min(len, buf_len - offset), buf + offset, len); + dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)", + (out ? 't' : 'r'), offset, + min(len, buf_len - offset), buf + offset, len, buf_len); inout = out ? "Request" : "Got"; @@ -731,6 +732,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, case MCE_RSP_CMD_ILLEGAL: dev_dbg(dev, "Illegal PORT_IR command"); break; + case MCE_RSP_TX_TIMEOUT: + dev_dbg(dev, "IR TX timeout (TX buffer underrun)"); + break; default: dev_dbg(dev, "Unknown command 0x%02x 0x%02x", cmd, subcmd); @@ -745,13 +749,14 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, dev_dbg(dev, "End of raw IR data"); else if ((cmd != MCE_CMD_PORT_IR) && ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA)) - dev_dbg(dev, "Raw IR data, %d pulse/space samples", ir->rem); + dev_dbg(dev, "Raw IR data, %d pulse/space samples", + cmd & MCE_PACKET_LENGTH_MASK); #endif } /* * Schedule work that can't be done in interrupt handlers - * (mceusb_dev_recv() and mce_async_callback()) nor tasklets. + * (mceusb_dev_recv() and mce_write_callback()) nor tasklets. * Invokes mceusb_deferred_kevent() for recovering from * error events specified by the kevent bit field. */ @@ -764,23 +769,80 @@ static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent) dev_dbg(ir->dev, "kevent %d scheduled", kevent); } -static void mce_async_callback(struct urb *urb) +static void mce_write_callback(struct urb *urb) { - struct mceusb_dev *ir; - int len; - if (!urb) return; - ir = urb->context; + complete(urb->context); +} + +/* + * Write (TX/send) data to MCE device USB endpoint out. + * Used for IR blaster TX and MCE device commands. + * + * Return: The number of bytes written (> 0) or errno (< 0). + */ +static int mce_write(struct mceusb_dev *ir, u8 *data, int size) +{ + int ret; + struct urb *urb; + struct device *dev = ir->dev; + unsigned char *buf_out; + struct completion tx_done; + unsigned long expire; + unsigned long ret_wait; + + mceusb_dev_printdata(ir, data, size, 0, size, true); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (unlikely(!urb)) { + dev_err(dev, "Error: mce write couldn't allocate urb"); + return -ENOMEM; + } + + buf_out = kmalloc(size, GFP_KERNEL); + if (!buf_out) { + usb_free_urb(urb); + return -ENOMEM; + } + + init_completion(&tx_done); + + /* outbound data */ + if (usb_endpoint_xfer_int(ir->usb_ep_out)) + usb_fill_int_urb(urb, ir->usbdev, ir->pipe_out, + buf_out, size, mce_write_callback, &tx_done, + ir->usb_ep_out->bInterval); + else + usb_fill_bulk_urb(urb, ir->usbdev, ir->pipe_out, + buf_out, size, mce_write_callback, &tx_done); + memcpy(buf_out, data, size); + + ret = usb_submit_urb(urb, GFP_KERNEL); + if (ret) { + dev_err(dev, "Error: mce write submit urb error = %d", ret); + kfree(buf_out); + usb_free_urb(urb); + return ret; + } + + expire = msecs_to_jiffies(USB_TX_TIMEOUT); + ret_wait = wait_for_completion_timeout(&tx_done, expire); + if (!ret_wait) { + dev_err(dev, "Error: mce write timed out (expire = %lu (%dms))", + expire, USB_TX_TIMEOUT); + usb_kill_urb(urb); + ret = (urb->status == -ENOENT ? -ETIMEDOUT : urb->status); + } else { + ret = urb->status; + } + if (ret >= 0) + ret = urb->actual_length; /* bytes written */ switch (urb->status) { /* success */ case 0: - len = urb->actual_length; - - mceusb_dev_printdata(ir, urb->transfer_buffer, len, - 0, len, true); break; case -ECONNRESET: @@ -790,140 +852,135 @@ static void mce_async_callback(struct urb *urb) break; case -EPIPE: - dev_err(ir->dev, "Error: request urb status = %d (TX HALT)", + dev_err(ir->dev, "Error: mce write urb status = %d (TX HALT)", urb->status); mceusb_defer_kevent(ir, EVENT_TX_HALT); break; default: - dev_err(ir->dev, "Error: request urb status = %d", urb->status); + dev_err(ir->dev, "Error: mce write urb status = %d", + urb->status); break; } - /* the transfer buffer and urb were allocated in mce_request_packet */ - kfree(urb->transfer_buffer); - usb_free_urb(urb); -} - -/* request outgoing (send) usb packet - used to initialize remote */ -static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data, - int size) -{ - int res; - struct urb *async_urb; - struct device *dev = ir->dev; - unsigned char *async_buf; + dev_dbg(dev, "tx done status = %d (wait = %lu, expire = %lu (%dms), urb->actual_length = %d, urb->status = %d)", + ret, ret_wait, expire, USB_TX_TIMEOUT, + urb->actual_length, urb->status); - async_urb = usb_alloc_urb(0, GFP_KERNEL); - if (unlikely(!async_urb)) { - dev_err(dev, "Error, couldn't allocate urb!"); - return; - } - - async_buf = kmalloc(size, GFP_KERNEL); - if (!async_buf) { - usb_free_urb(async_urb); - return; - } - - /* outbound data */ - if (usb_endpoint_xfer_int(ir->usb_ep_out)) - usb_fill_int_urb(async_urb, ir->usbdev, ir->pipe_out, - async_buf, size, mce_async_callback, ir, - ir->usb_ep_out->bInterval); - else - usb_fill_bulk_urb(async_urb, ir->usbdev, ir->pipe_out, - async_buf, size, mce_async_callback, ir); - - memcpy(async_buf, data, size); - - dev_dbg(dev, "send request called (size=%#x)", size); + kfree(buf_out); + usb_free_urb(urb); - res = usb_submit_urb(async_urb, GFP_ATOMIC); - if (res) { - dev_err(dev, "send request FAILED! (res=%d)", res); - kfree(async_buf); - usb_free_urb(async_urb); - return; - } - dev_dbg(dev, "send request complete (res=%d)", res); + return ret; } -static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size) +static void mce_command_out(struct mceusb_dev *ir, u8 *data, int size) { int rsize = sizeof(DEVICE_RESUME); if (ir->need_reset) { ir->need_reset = false; - mce_request_packet(ir, DEVICE_RESUME, rsize); + mce_write(ir, DEVICE_RESUME, rsize); msleep(10); } - mce_request_packet(ir, data, size); + mce_write(ir, data, size); msleep(10); } -/* Send data out the IR blaster port(s) */ +/* + * Transmit IR out the MCE device IR blaster port(s). + * + * Convert IR pulse/space sequence from LIRC to MCE format. + * Break up a long IR sequence into multiple parts (MCE IR data packets). + * + * u32 txbuf[] consists of IR pulse, space, ..., and pulse times in usec. + * Pulses and spaces are implicit by their position. + * The first IR sample, txbuf[0], is always a pulse. + * + * u8 irbuf[] consists of multiple IR data packets for the MCE device. + * A packet is 1 u8 MCE_IRDATA_HEADER and up to 30 u8 IR samples. + * An IR sample is 1-bit pulse/space flag with 7-bit time + * in MCE time units (50usec). + * + * Return: The number of IR samples sent (> 0) or errno (< 0). + */ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count) { struct mceusb_dev *ir = dev->priv; - int i, length, ret = 0; - int cmdcount = 0; - unsigned char cmdbuf[MCE_CMDBUF_SIZE]; - - /* MCE tx init header */ - cmdbuf[cmdcount++] = MCE_CMD_PORT_IR; - cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS; - cmdbuf[cmdcount++] = ir->tx_mask; + u8 cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00 }; + u8 irbuf[MCE_IRBUF_SIZE]; + int ircount = 0; + unsigned int irsample; + int i, length, ret; /* Send the set TX ports command */ - mce_async_out(ir, cmdbuf, cmdcount); - cmdcount = 0; - - /* Generate mce packet data */ - for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) { - txbuf[i] = txbuf[i] / MCE_TIME_UNIT; - - do { /* loop to support long pulses/spaces > 127*50us=6.35ms */ - - /* Insert mce packet header every 4th entry */ - if ((cmdcount < MCE_CMDBUF_SIZE) && - (cmdcount % MCE_CODE_LENGTH) == 0) - cmdbuf[cmdcount++] = MCE_IRDATA_HEADER; - - /* Insert mce packet data */ - if (cmdcount < MCE_CMDBUF_SIZE) - cmdbuf[cmdcount++] = - (txbuf[i] < MCE_PULSE_BIT ? - txbuf[i] : MCE_MAX_PULSE_LENGTH) | - (i & 1 ? 0x00 : MCE_PULSE_BIT); - else { - ret = -EINVAL; - goto out; + cmdbuf[2] = ir->tx_mask; + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); + + /* Generate mce IR data packet */ + for (i = 0; i < count; i++) { + irsample = txbuf[i] / MCE_TIME_UNIT; + + /* loop to support long pulses/spaces > 6350us (127*50us) */ + while (irsample > 0) { + /* Insert IR header every 30th entry */ + if (ircount % MCE_PACKET_SIZE == 0) { + /* Room for IR header and one IR sample? */ + if (ircount >= MCE_IRBUF_SIZE - 1) { + /* Send near full buffer */ + ret = mce_write(ir, irbuf, ircount); + if (ret < 0) + return ret; + ircount = 0; + } + irbuf[ircount++] = MCE_IRDATA_HEADER; } - } while ((txbuf[i] > MCE_MAX_PULSE_LENGTH) && - (txbuf[i] -= MCE_MAX_PULSE_LENGTH)); - } - - /* Check if we have room for the empty packet at the end */ - if (cmdcount >= MCE_CMDBUF_SIZE) { - ret = -EINVAL; - goto out; - } + /* Insert IR sample */ + if (irsample <= MCE_MAX_PULSE_LENGTH) { + irbuf[ircount] = irsample; + irsample = 0; + } else { + irbuf[ircount] = MCE_MAX_PULSE_LENGTH; + irsample -= MCE_MAX_PULSE_LENGTH; + } + /* + * Even i = IR pulse + * Odd i = IR space + */ + irbuf[ircount] |= (i & 1 ? 0 : MCE_PULSE_BIT); + ircount++; + + /* IR buffer full? */ + if (ircount >= MCE_IRBUF_SIZE) { + /* Fix packet length in last header */ + length = ircount % MCE_PACKET_SIZE; + if (length > 0) + irbuf[ircount - length] -= + MCE_PACKET_SIZE - length; + /* Send full buffer */ + ret = mce_write(ir, irbuf, ircount); + if (ret < 0) + return ret; + ircount = 0; + } + } + } /* after for loop, 0 <= ircount < MCE_IRBUF_SIZE */ /* Fix packet length in last header */ - length = cmdcount % MCE_CODE_LENGTH; - cmdbuf[cmdcount - length] -= MCE_CODE_LENGTH - length; + length = ircount % MCE_PACKET_SIZE; + if (length > 0) + irbuf[ircount - length] -= MCE_PACKET_SIZE - length; - /* All mce commands end with an empty packet (0x80) */ - cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER; + /* Append IR trailer (0x80) to final partial (or empty) IR buffer */ + irbuf[ircount++] = MCE_IRDATA_TRAILER; - /* Transmit the command to the mce device */ - mce_async_out(ir, cmdbuf, cmdcount); + /* Send final buffer */ + ret = mce_write(ir, irbuf, ircount); + if (ret < 0) + return ret; -out: - return ret ? ret : count; + return count; } /* Sets active IR outputs -- mce devices typically have two */ @@ -963,7 +1020,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier) cmdbuf[2] = MCE_CMD_SIG_END; cmdbuf[3] = MCE_IRDATA_TRAILER; dev_dbg(ir->dev, "disabling carrier modulation"); - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); return 0; } @@ -977,7 +1034,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier) carrier); /* Transmit new carrier to mce device */ - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); return 0; } } @@ -1000,10 +1057,10 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout) cmdbuf[2] = units >> 8; cmdbuf[3] = units; - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); /* get receiver timeout value */ - mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); + mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); return 0; } @@ -1028,7 +1085,7 @@ static int mceusb_set_rx_wideband(struct rc_dev *dev, int enable) ir->wideband_rx_enabled = false; cmdbuf[2] = 1; /* port 1 is long range receiver */ } - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); /* response from device sets ir->learning_active */ return 0; @@ -1051,7 +1108,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable) ir->carrier_report_enabled = true; if (!ir->learning_active) { cmdbuf[2] = 2; /* port 2 is short range receiver */ - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); } } else { ir->carrier_report_enabled = false; @@ -1062,7 +1119,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable) */ if (ir->learning_active && !ir->wideband_rx_enabled) { cmdbuf[2] = 1; /* port 1 is long range receiver */ - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); } } @@ -1141,6 +1198,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index) } break; case MCE_RSP_CMD_ILLEGAL: + case MCE_RSP_TX_TIMEOUT: ir->need_reset = true; break; default: @@ -1279,7 +1337,7 @@ static void mceusb_get_emulator_version(struct mceusb_dev *ir) { /* If we get no reply or an illegal command reply, its ver 1, says MS */ ir->emver = 1; - mce_async_out(ir, GET_EMVER, sizeof(GET_EMVER)); + mce_command_out(ir, GET_EMVER, sizeof(GET_EMVER)); } static void mceusb_gen1_init(struct mceusb_dev *ir) @@ -1325,10 +1383,10 @@ static void mceusb_gen1_init(struct mceusb_dev *ir) dev_dbg(dev, "set handshake - retC = %d", ret); /* device resume */ - mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME)); + mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME)); /* get hw/sw revision? */ - mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION)); + mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION)); kfree(data); } @@ -1336,13 +1394,13 @@ static void mceusb_gen1_init(struct mceusb_dev *ir) static void mceusb_gen2_init(struct mceusb_dev *ir) { /* device resume */ - mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME)); + mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME)); /* get wake version (protocol, key, address) */ - mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION)); + mce_command_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION)); /* unknown what this one actually returns... */ - mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2)); + mce_command_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2)); } static void mceusb_get_parameters(struct mceusb_dev *ir) @@ -1356,24 +1414,24 @@ static void mceusb_get_parameters(struct mceusb_dev *ir) ir->num_rxports = 2; /* get number of tx and rx ports */ - mce_async_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS)); + mce_command_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS)); /* get the carrier and frequency */ - mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ)); + mce_command_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ)); if (ir->num_txports && !ir->flags.no_tx) /* get the transmitter bitmask */ - mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK)); + mce_command_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK)); /* get receiver timeout value */ - mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); + mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); /* get receiver sensor setting */ - mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR)); + mce_command_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR)); for (i = 0; i < ir->num_txports; i++) { cmdbuf[2] = i; - mce_async_out(ir, cmdbuf, sizeof(cmdbuf)); + mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); } } @@ -1382,7 +1440,7 @@ static void mceusb_flash_led(struct mceusb_dev *ir) if (ir->emver < 2) return; - mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED)); + mce_command_out(ir, FLASH_LED, sizeof(FLASH_LED)); } /* diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c index 50fb0aebb8d4..f2259082e3d8 100644 --- a/drivers/media/rc/mtk-cir.c +++ b/drivers/media/rc/mtk-cir.c @@ -35,6 +35,11 @@ /* Fields containing pulse width data */ #define MTK_WIDTH_MASK (GENMASK(7, 0)) +/* IR threshold */ +#define MTK_IRTHD 0x14 +#define MTK_DG_CNT_MASK (GENMASK(12, 8)) +#define MTK_DG_CNT(x) ((x) << 8) + /* Bit to enable interrupt */ #define MTK_IRINT_EN BIT(0) @@ -398,6 +403,9 @@ static int mtk_ir_probe(struct platform_device *pdev) mtk_w32_mask(ir, val, ir->data->fields[MTK_HW_PERIOD].mask, ir->data->fields[MTK_HW_PERIOD].reg); + /* Set de-glitch counter */ + mtk_w32_mask(ir, MTK_DG_CNT(1), MTK_DG_CNT_MASK, MTK_IRTHD); + /* Enable IR and PWM */ val = mtk_r32(ir, MTK_CONFIG_HIGH_REG); val |= MTK_OK_COUNT(ir->data->ok_count) | MTK_PWM_EN | MTK_IR_EN; diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c index 17468f7d78ed..3ab80a7b4498 100644 --- a/drivers/media/usb/cpia2/cpia2_usb.c +++ b/drivers/media/usb/cpia2/cpia2_usb.c @@ -676,6 +676,10 @@ static int submit_urbs(struct camera_data *cam) if (!urb) { for (j = 0; j < i; j++) usb_free_urb(cam->sbuf[j].urb); + for (j = 0; j < NUM_SBUF; j++) { + kfree(cam->sbuf[j].data); + cam->sbuf[j].data = NULL; + } return -ENOMEM; } diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 66d685065e06..ab7a100ec84f 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -2439,9 +2439,13 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap) 8, 0x0486, }; + if (!IS_ENABLED(CONFIG_DVB_DIB9000)) + return -ENODEV; if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL) return -ENODEV; i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0); + if (!i2c) + return -ENODEV; if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0) return -ENODEV; dib0700_set_i2c_speed(adap->dev, 1500); @@ -2517,10 +2521,14 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap) 0, 0x00ef, 8, 0x0406, }; + if (!IS_ENABLED(CONFIG_DVB_DIB9000)) + return -ENODEV; i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe); if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL) return -ENODEV; i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0); + if (!i2c) + return -ENODEV; if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0) return -ENODEV; diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c index d6b36e4f33d2..441d878fc22c 100644 --- a/drivers/media/usb/dvb-usb/pctv452e.c +++ b/drivers/media/usb/dvb-usb/pctv452e.c @@ -909,14 +909,6 @@ static int pctv452e_frontend_attach(struct dvb_usb_adapter *a) &a->dev->i2c_adap); if (!a->fe_adap[0].fe) return -ENODEV; - - /* - * dvb_frontend will call dvb_detach for both stb0899_detach - * and stb0899_release but we only do dvb_attach(stb0899_attach). - * Increment the module refcount instead. - */ - symbol_get(stb0899_attach); - if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe, &a->dev->i2c_adap)) == NULL) err("Cannot attach lnbp22\n"); diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index 1283c7ca9ad5..1de835a591a0 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -4020,7 +4020,6 @@ static void em28xx_usb_disconnect(struct usb_interface *intf) dev->dev_next->disconnected = 1; dev_info(&dev->intf->dev, "Disconnecting %s\n", dev->dev_next->name); - flush_request_modules(dev->dev_next); } dev->disconnected = 1; diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c index d8e40137a204..53db9a2895ea 100644 --- a/drivers/media/usb/gspca/konica.c +++ b/drivers/media/usb/gspca/konica.c @@ -114,6 +114,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index) if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, 2); } } diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c index 59649704beba..880f569bda30 100644 --- a/drivers/media/usb/gspca/nw80x.c +++ b/drivers/media/usb/gspca/nw80x.c @@ -1572,6 +1572,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); return; } if (len == 1) diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c index cfb1f53bc17e..f417dfc0b872 100644 --- a/drivers/media/usb/gspca/ov519.c +++ b/drivers/media/usb/gspca/ov519.c @@ -2073,6 +2073,11 @@ static int reg_r(struct sd *sd, u16 index) } else { gspca_err(gspca_dev, "reg_r %02x failed %d\n", index, ret); sd->gspca_dev.usb_err = ret; + /* + * Make sure the result is zeroed to avoid uninitialized + * values. + */ + gspca_dev->usb_buf[0] = 0; } return ret; @@ -2101,6 +2106,11 @@ static int reg_r8(struct sd *sd, } else { gspca_err(gspca_dev, "reg_r8 %02x failed %d\n", index, ret); sd->gspca_dev.usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, 8); } return ret; diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c index 56521c991db4..185c1f10fb30 100644 --- a/drivers/media/usb/gspca/ov534.c +++ b/drivers/media/usb/gspca/ov534.c @@ -693,6 +693,11 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg) if (ret < 0) { pr_err("read failed %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the result is zeroed to avoid uninitialized + * values. + */ + gspca_dev->usb_buf[0] = 0; } return gspca_dev->usb_buf[0]; } diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c index 867f860a9650..91efc650cf76 100644 --- a/drivers/media/usb/gspca/ov534_9.c +++ b/drivers/media/usb/gspca/ov534_9.c @@ -1145,6 +1145,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg) if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + return 0; } return gspca_dev->usb_buf[0]; } diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c index 061deee138c3..e087cfb5980b 100644 --- a/drivers/media/usb/gspca/se401.c +++ b/drivers/media/usb/gspca/se401.c @@ -101,6 +101,11 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent) pr_err("read req failed req %#04x error %d\n", req, err); gspca_dev->usb_err = err; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, READ_REQ_SIZE); } } diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c index b43f89fee6c1..2a6d0a1265a7 100644 --- a/drivers/media/usb/gspca/sn9c20x.c +++ b/drivers/media/usb/gspca/sn9c20x.c @@ -123,6 +123,13 @@ static const struct dmi_system_id flip_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "0341") } }, + { + .ident = "MSI MS-1039", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-1039"), + } + }, { .ident = "MSI MS-1632", .matches = { @@ -909,6 +916,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length) if (unlikely(result < 0 || result != length)) { pr_err("Read register %02x failed %d\n", reg, result); gspca_dev->usb_err = result; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index 046fc2c2a135..4d655e2da9cb 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c @@ -453,6 +453,11 @@ static void reg_r(struct gspca_dev *gspca_dev, dev_err(gspca_dev->v4l2_dev.dev, "Error reading register %02x: %d\n", value, res); gspca_dev->usb_err = res; + /* + * Make sure the result is zeroed to avoid uninitialized + * values. + */ + gspca_dev->usb_buf[0] = 0; } } diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c index 50a6c8425827..2e1bd2df8304 100644 --- a/drivers/media/usb/gspca/sonixj.c +++ b/drivers/media/usb/gspca/sonixj.c @@ -1162,6 +1162,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c index 2ae03b60163f..ccc477944ef8 100644 --- a/drivers/media/usb/gspca/spca1528.c +++ b/drivers/media/usb/gspca/spca1528.c @@ -71,6 +71,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c index d1ba0888d798..c3610247a90e 100644 --- a/drivers/media/usb/gspca/sq930x.c +++ b/drivers/media/usb/gspca/sq930x.c @@ -425,6 +425,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r %04x failed %d\n", value, ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c index d0ddfa957ca9..f4a4222f0d2e 100644 --- a/drivers/media/usb/gspca/sunplus.c +++ b/drivers/media/usb/gspca/sunplus.c @@ -255,6 +255,11 @@ static void reg_r(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } diff --git a/drivers/media/usb/gspca/vc032x.c b/drivers/media/usb/gspca/vc032x.c index 588a847ea483..4cb7c92ea132 100644 --- a/drivers/media/usb/gspca/vc032x.c +++ b/drivers/media/usb/gspca/vc032x.c @@ -2906,6 +2906,11 @@ static void reg_r_i(struct gspca_dev *gspca_dev, if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ); } } static void reg_r(struct gspca_dev *gspca_dev, diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c index 16b679c2de21..a8350ee9712f 100644 --- a/drivers/media/usb/gspca/w996Xcf.c +++ b/drivers/media/usb/gspca/w996Xcf.c @@ -133,6 +133,11 @@ static int w9968cf_read_sb(struct sd *sd) } else { pr_err("Read SB reg [01] failed\n"); sd->gspca_dev.usb_err = ret; + /* + * Make sure the buffer is zeroed to avoid uninitialized + * values. + */ + memset(sd->gspca_dev.usb_buf, 0, 2); } udelay(W9968CF_I2C_BUS_DELAY); diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index 9b9d894d29bc..b75c18a012a7 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -137,6 +137,7 @@ static int device_authorization(struct hdpvr_device *dev) dev->fw_ver = dev->usbc_buf[1]; + dev->usbc_buf[46] = '\0'; v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n", dev->fw_ver, &dev->usbc_buf[2]); @@ -271,6 +272,7 @@ static int hdpvr_probe(struct usb_interface *interface, #endif size_t buffer_size; int i; + int dev_num; int retval = -ENOMEM; /* allocate memory for our device state and initialize it */ @@ -368,8 +370,17 @@ static int hdpvr_probe(struct usb_interface *interface, } #endif + dev_num = atomic_inc_return(&dev_nr); + if (dev_num >= HDPVR_MAX) { + v4l2_err(&dev->v4l2_dev, + "max device number reached, device register failed\n"); + atomic_dec(&dev_nr); + retval = -ENODEV; + goto reg_fail; + } + retval = hdpvr_register_videodev(dev, &interface->dev, - video_nr[atomic_inc_return(&dev_nr)]); + video_nr[dev_num]); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); goto reg_fail; diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index 1d0afa340f47..3198f9624b7c 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -319,7 +319,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command, dprintk("%s\n", __func__); - b = kmalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL); + b = kzalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL); if (!b) return -ENOMEM; diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c index 7ef3e4d22bf6..939fc11cf080 100644 --- a/drivers/media/v4l2-core/videobuf-core.c +++ b/drivers/media/v4l2-core/videobuf-core.c @@ -1123,7 +1123,6 @@ __poll_t videobuf_poll_stream(struct file *file, struct videobuf_buffer *buf = NULL; __poll_t rc = 0; - poll_wait(file, &buf->done, wait); videobuf_queue_lock(q); if (q->streaming) { if (!list_empty(&q->stream)) @@ -1143,7 +1142,9 @@ __poll_t videobuf_poll_stream(struct file *file, } buf = q->read_buf; } - if (!buf) + if (buf) + poll_wait(file, &buf->done, wait); + else rc = EPOLLERR; if (0 == rc) { diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index 0bcc5e83bd1a..40109a615922 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -31,6 +31,7 @@ static int process_sdio_pending_irqs(struct mmc_host *host) { struct mmc_card *card = host->card; int i, ret, count; + bool sdio_irq_pending = host->sdio_irq_pending; unsigned char pending; struct sdio_func *func; @@ -38,13 +39,16 @@ static int process_sdio_pending_irqs(struct mmc_host *host) if (mmc_card_suspended(card)) return 0; + /* Clear the flag to indicate that we have processed the IRQ. */ + host->sdio_irq_pending = false; + /* * Optimization, if there is only 1 function interrupt registered * and we know an IRQ was signaled then call irq handler directly. * Otherwise do the full probe. */ func = card->sdio_single_irq; - if (func && host->sdio_irq_pending) { + if (func && sdio_irq_pending) { func->irq_handler(func); return 1; } @@ -96,7 +100,6 @@ static void sdio_run_irqs(struct mmc_host *host) { mmc_claim_host(host); if (host->sdio_irqs) { - host->sdio_irq_pending = true; process_sdio_pending_irqs(host); if (host->ops->ack_sdio_irq) host->ops->ack_sdio_irq(host); @@ -114,6 +117,7 @@ void sdio_irq_work(struct work_struct *work) void sdio_signal_irq(struct mmc_host *host) { + host->sdio_irq_pending = true; queue_delayed_work(system_wq, &host->sdio_irq_work, 0); } EXPORT_SYMBOL_GPL(sdio_signal_irq); @@ -159,7 +163,6 @@ static int sdio_irq_thread(void *_host) if (ret) break; ret = process_sdio_pending_irqs(host); - host->sdio_irq_pending = false; mmc_release_host(host); /* diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index eea52e2c5a0c..79c55c7b4afd 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -3460,6 +3460,10 @@ int dw_mci_runtime_resume(struct device *dev) /* Force setup bus to guarantee available clock output */ dw_mci_setup_bus(host->slot, true); + /* Re-enable SDIO interrupts. */ + if (sdio_irq_claimed(host->slot->mmc)) + __dw_mci_enable_sdio_irq(host->slot, 1); + /* Now that slots are all setup, we can enable card detect */ dw_mci_enable_cd(host); diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 33f4b6387ef7..978c8ccce7e3 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -2421,6 +2421,9 @@ static void msdc_restore_reg(struct msdc_host *host) } else { writel(host->save_para.pad_tune, host->base + tune_reg); } + + if (sdio_irq_claimed(host->mmc)) + __msdc_enable_sdio_irq(host, 1); } static int msdc_runtime_suspend(struct device *dev) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index a5dc5aae973e..c66e66fbaeb4 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1849,7 +1849,9 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) ctrl_2 |= SDHCI_CTRL_UHS_SDR104; else if (timing == MMC_TIMING_UHS_SDR12) ctrl_2 |= SDHCI_CTRL_UHS_SDR12; - else if (timing == MMC_TIMING_UHS_SDR25) + else if (timing == MMC_TIMING_SD_HS || + timing == MMC_TIMING_MMC_HS || + timing == MMC_TIMING_UHS_SDR25) ctrl_2 |= SDHCI_CTRL_UHS_SDR25; else if (timing == MMC_TIMING_UHS_SDR50) ctrl_2 |= SDHCI_CTRL_UHS_SDR50; diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index e63acc077c18..8cc852dc7d54 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1427,21 +1427,16 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, struct stm32_fmc2_timings *tims = &nand->timings; unsigned long hclk = clk_get_rate(fmc2->clk); unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000); - int tar, tclr, thiz, twait, tset_mem, tset_att, thold_mem, thold_att; - - tar = hclkp; - if (tar < sdrt->tAR_min) - tar = sdrt->tAR_min; - tims->tar = DIV_ROUND_UP(tar, hclkp) - 1; - if (tims->tar > FMC2_PCR_TIMING_MASK) - tims->tar = FMC2_PCR_TIMING_MASK; - - tclr = hclkp; - if (tclr < sdrt->tCLR_min) - tclr = sdrt->tCLR_min; - tims->tclr = DIV_ROUND_UP(tclr, hclkp) - 1; - if (tims->tclr > FMC2_PCR_TIMING_MASK) - tims->tclr = FMC2_PCR_TIMING_MASK; + unsigned long timing, tar, tclr, thiz, twait; + unsigned long tset_mem, tset_att, thold_mem, thold_att; + + tar = max_t(unsigned long, hclkp, sdrt->tAR_min); + timing = DIV_ROUND_UP(tar, hclkp) - 1; + tims->tar = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK); + + tclr = max_t(unsigned long, hclkp, sdrt->tCLR_min); + timing = DIV_ROUND_UP(tclr, hclkp) - 1; + tims->tclr = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK); tims->thiz = FMC2_THIZ; thiz = (tims->thiz + 1) * hclkp; @@ -1451,18 +1446,11 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, * tWAIT > tWP * tWAIT > tREA + tIO */ - twait = hclkp; - if (twait < sdrt->tRP_min) - twait = sdrt->tRP_min; - if (twait < sdrt->tWP_min) - twait = sdrt->tWP_min; - if (twait < sdrt->tREA_max + FMC2_TIO) - twait = sdrt->tREA_max + FMC2_TIO; - tims->twait = DIV_ROUND_UP(twait, hclkp); - if (tims->twait == 0) - tims->twait = 1; - else if (tims->twait > FMC2_PMEM_PATT_TIMING_MASK) - tims->twait = FMC2_PMEM_PATT_TIMING_MASK; + twait = max_t(unsigned long, hclkp, sdrt->tRP_min); + twait = max_t(unsigned long, twait, sdrt->tWP_min); + twait = max_t(unsigned long, twait, sdrt->tREA_max + FMC2_TIO); + timing = DIV_ROUND_UP(twait, hclkp); + tims->twait = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK); /* * tSETUP_MEM > tCS - tWAIT @@ -1477,20 +1465,15 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, if (twait > thiz && (sdrt->tDS_min > twait - thiz) && (tset_mem < sdrt->tDS_min - (twait - thiz))) tset_mem = sdrt->tDS_min - (twait - thiz); - tims->tset_mem = DIV_ROUND_UP(tset_mem, hclkp); - if (tims->tset_mem == 0) - tims->tset_mem = 1; - else if (tims->tset_mem > FMC2_PMEM_PATT_TIMING_MASK) - tims->tset_mem = FMC2_PMEM_PATT_TIMING_MASK; + timing = DIV_ROUND_UP(tset_mem, hclkp); + tims->tset_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK); /* * tHOLD_MEM > tCH * tHOLD_MEM > tREH - tSETUP_MEM * tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT) */ - thold_mem = hclkp; - if (thold_mem < sdrt->tCH_min) - thold_mem = sdrt->tCH_min; + thold_mem = max_t(unsigned long, hclkp, sdrt->tCH_min); if (sdrt->tREH_min > tset_mem && (thold_mem < sdrt->tREH_min - tset_mem)) thold_mem = sdrt->tREH_min - tset_mem; @@ -1500,11 +1483,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, if ((sdrt->tWC_min > tset_mem + twait) && (thold_mem < sdrt->tWC_min - (tset_mem + twait))) thold_mem = sdrt->tWC_min - (tset_mem + twait); - tims->thold_mem = DIV_ROUND_UP(thold_mem, hclkp); - if (tims->thold_mem == 0) - tims->thold_mem = 1; - else if (tims->thold_mem > FMC2_PMEM_PATT_TIMING_MASK) - tims->thold_mem = FMC2_PMEM_PATT_TIMING_MASK; + timing = DIV_ROUND_UP(thold_mem, hclkp); + tims->thold_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK); /* * tSETUP_ATT > tCS - tWAIT @@ -1526,11 +1506,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, if (twait > thiz && (sdrt->tDS_min > twait - thiz) && (tset_att < sdrt->tDS_min - (twait - thiz))) tset_att = sdrt->tDS_min - (twait - thiz); - tims->tset_att = DIV_ROUND_UP(tset_att, hclkp); - if (tims->tset_att == 0) - tims->tset_att = 1; - else if (tims->tset_att > FMC2_PMEM_PATT_TIMING_MASK) - tims->tset_att = FMC2_PMEM_PATT_TIMING_MASK; + timing = DIV_ROUND_UP(tset_att, hclkp); + tims->tset_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK); /* * tHOLD_ATT > tALH @@ -1545,17 +1522,11 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, * tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT) * tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT) */ - thold_att = hclkp; - if (thold_att < sdrt->tALH_min) - thold_att = sdrt->tALH_min; - if (thold_att < sdrt->tCH_min) - thold_att = sdrt->tCH_min; - if (thold_att < sdrt->tCLH_min) - thold_att = sdrt->tCLH_min; - if (thold_att < sdrt->tCOH_min) - thold_att = sdrt->tCOH_min; - if (thold_att < sdrt->tDH_min) - thold_att = sdrt->tDH_min; + thold_att = max_t(unsigned long, hclkp, sdrt->tALH_min); + thold_att = max_t(unsigned long, thold_att, sdrt->tCH_min); + thold_att = max_t(unsigned long, thold_att, sdrt->tCLH_min); + thold_att = max_t(unsigned long, thold_att, sdrt->tCOH_min); + thold_att = max_t(unsigned long, thold_att, sdrt->tDH_min); if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) && (thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem)) thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem; @@ -1574,11 +1545,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, if ((sdrt->tWC_min > tset_att + twait) && (thold_att < sdrt->tWC_min - (tset_att + twait))) thold_att = sdrt->tWC_min - (tset_att + twait); - tims->thold_att = DIV_ROUND_UP(thold_att, hclkp); - if (tims->thold_att == 0) - tims->thold_att = 1; - else if (tims->thold_att > FMC2_PMEM_PATT_TIMING_MASK) - tims->thold_att = FMC2_PMEM_PATT_TIMING_MASK; + timing = DIV_ROUND_UP(thold_att, hclkp); + tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK); } static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr, diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 8459115d9d4e..553776cc1d29 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -1063,31 +1063,34 @@ EXPORT_SYMBOL(arcnet_interrupt); static void arcnet_rx(struct net_device *dev, int bufnum) { struct arcnet_local *lp = netdev_priv(dev); - struct archdr pkt; + union { + struct archdr pkt; + char buf[512]; + } rxdata; struct arc_rfc1201 *soft; int length, ofs; - soft = &pkt.soft.rfc1201; + soft = &rxdata.pkt.soft.rfc1201; - lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE); - if (pkt.hard.offset[0]) { - ofs = pkt.hard.offset[0]; + lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE); + if (rxdata.pkt.hard.offset[0]) { + ofs = rxdata.pkt.hard.offset[0]; length = 256 - ofs; } else { - ofs = pkt.hard.offset[1]; + ofs = rxdata.pkt.hard.offset[1]; length = 512 - ofs; } /* get the full header, if possible */ - if (sizeof(pkt.soft) <= length) { - lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft)); + if (sizeof(rxdata.pkt.soft) <= length) { + lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft)); } else { - memset(&pkt.soft, 0, sizeof(pkt.soft)); + memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft)); lp->hw.copy_from_card(dev, bufnum, ofs, soft, length); } arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n", - bufnum, pkt.hard.source, pkt.hard.dest, length); + bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length); dev->stats.rx_packets++; dev->stats.rx_bytes += length + ARC_HDR_SIZE; @@ -1096,13 +1099,13 @@ static void arcnet_rx(struct net_device *dev, int bufnum) if (arc_proto_map[soft->proto]->is_ip) { if (BUGLVL(D_PROTO)) { struct ArcProto - *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]], + *oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]], *newp = arc_proto_map[soft->proto]; if (oldp != newp) { arc_printk(D_PROTO, dev, "got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n", - soft->proto, pkt.hard.source, + soft->proto, rxdata.pkt.hard.source, newp->suffix, oldp->suffix); } } @@ -1111,10 +1114,10 @@ static void arcnet_rx(struct net_device *dev, int bufnum) lp->default_proto[0] = soft->proto; /* in striking contrast, the following isn't a hack. */ - lp->default_proto[pkt.hard.source] = soft->proto; + lp->default_proto[rxdata.pkt.hard.source] = soft->proto; } /* call the protocol-specific receiver. */ - arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length); + arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length); } static void null_rx(struct net_device *dev, int bufnum, diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 395b05701480..a1fab77b2096 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) else phy_reg |= 0xFA; e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg); + + if (speed == SPEED_1000) { + hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL, + &phy_reg); + + phy_reg |= HV_PM_CTRL_K1_CLK_REQ; + + hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL, + phy_reg); + } } hw->phy.ops.release(hw); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index eb09c755fa17..1502895eb45d 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -210,7 +210,7 @@ /* PHY Power Management Control */ #define HV_PM_CTRL PHY_REG(770, 17) -#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 +#define HV_PM_CTRL_K1_CLK_REQ 0x200 #define HV_PM_CTRL_K1_ENABLE 0x4000 #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 9ebbe3da61bb..d22491ce73e6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2583,6 +2583,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) return; if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) return; + if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) { + set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); + return; + } for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && @@ -2597,6 +2601,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) } } } + clear_bit(__I40E_VF_DISABLE, pf->state); } /** diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 9ac854c2b371..697321898e84 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3108,7 +3108,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, skb_put(skb, len); if (dev->features & NETIF_F_RXCSUM) { - skb->csum = csum; + skb->csum = le16_to_cpu(csum); skb->ip_summed = CHECKSUM_COMPLETE; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 94304abc49e9..39e90b873319 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -399,10 +399,10 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv, struct mlx5_flow_table *ft, struct ethtool_rx_flow_spec *fs) { + struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND }; struct mlx5_flow_destination *dst = NULL; - struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_spec *spec; struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; int err = 0; spec = kvzalloc(sizeof(*spec), GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 00b2d4a86159..98be5fe33674 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1369,46 +1369,63 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, return err; } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { - struct flow_match_ipv4_addrs match; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_match_control match; + u16 addr_type; - flow_rule_match_enc_ipv4_addrs(rule, &match); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, - src_ipv4_src_ipv6.ipv4_layout.ipv4, - ntohl(match.mask->src)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, - src_ipv4_src_ipv6.ipv4_layout.ipv4, - ntohl(match.key->src)); - - MLX5_SET(fte_match_set_lyr_2_4, headers_c, - dst_ipv4_dst_ipv6.ipv4_layout.ipv4, - ntohl(match.mask->dst)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, - dst_ipv4_dst_ipv6.ipv4_layout.ipv4, - ntohl(match.key->dst)); - - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); - } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { - struct flow_match_ipv6_addrs match; + flow_rule_match_enc_control(rule, &match); + addr_type = match.key->addr_type; - flow_rule_match_enc_ipv6_addrs(rule, &match); - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, - src_ipv4_src_ipv6.ipv6_layout.ipv6), - &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, - src_ipv4_src_ipv6.ipv6_layout.ipv6), - &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + /* For tunnel addr_type used same key id`s as for non-tunnel */ + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, - dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, - dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + flow_rule_match_enc_ipv4_addrs(rule, &match); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4, + ntohl(match.mask->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv4_layout.ipv4, + ntohl(match.key->src)); - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4, + ntohl(match.mask->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4, + ntohl(match.key->dst)); + + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, + ethertype); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, + ETH_P_IP); + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_enc_ipv6_addrs(rule, &match); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, + ipv6)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, + ipv6)); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, + ipv6)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, + ipv6)); + + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, + ethertype); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, + ETH_P_IPV6); + } } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index b15b27a497fc..fda4964c5cf4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1554,6 +1554,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ { 0, } }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index eb846133943b..acb02e1513f2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -400,6 +400,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; + nfp_repr_free(repr); goto err_reprs_clean; } @@ -413,6 +414,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, port = nfp_port_alloc(app, port_type, repr); if (IS_ERR(port)) { err = PTR_ERR(port); + kfree(repr_priv); nfp_repr_free(repr); goto err_reprs_clean; } @@ -433,6 +435,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, err = nfp_repr_init(app, repr, port_id, port, priv->nn->dp.netdev); if (err) { + kfree(repr_priv); nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; @@ -515,6 +518,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); if (!repr_priv) { err = -ENOMEM; + nfp_repr_free(repr); goto err_reprs_clean; } @@ -525,11 +529,13 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); if (IS_ERR(port)) { err = PTR_ERR(port); + kfree(repr_priv); nfp_repr_free(repr); goto err_reprs_clean; } err = nfp_port_init_phy_port(app->pf, app, port, i); if (err) { + kfree(repr_priv); nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; @@ -542,6 +548,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) err = nfp_repr_init(app, repr, cmsg_port_id, port, priv->nn->dp.netdev); if (err) { + kfree(repr_priv); nfp_port_free(port); nfp_repr_free(repr); goto err_reprs_clean; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index f7e11f1b0426..b0c8be127bee 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1344,13 +1344,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) pldat->dma_buff_base_p = dma_handle; netdev_dbg(ndev, "IO address space :%pR\n", res); - netdev_dbg(ndev, "IO address size :%d\n", resource_size(res)); + netdev_dbg(ndev, "IO address size :%zd\n", + (size_t)resource_size(res)); netdev_dbg(ndev, "IO address (mapped) :0x%p\n", pldat->net_base); netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq); - netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size); - netdev_dbg(ndev, "DMA buffer P address :0x%08x\n", - pldat->dma_buff_base_p); + netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size); + netdev_dbg(ndev, "DMA buffer P address :%pad\n", + &pldat->dma_buff_base_p); netdev_dbg(ndev, "DMA buffer V address :0x%p\n", pldat->dma_buff_base_v); @@ -1397,8 +1398,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) if (ret) goto err_out_unregister_netdev; - netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", - res->start, ndev->irq); + netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n", + (unsigned long)res->start, ndev->irq); device_init_wakeup(dev, 1); device_set_wakeup_enable(dev, 0); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b19ab09cb18f..5c4408bdc843 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1532,13 +1532,15 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) for (queue = 0; queue < rx_count; queue++) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct page_pool_params pp_params = { 0 }; + unsigned int num_pages; rx_q->queue_index = queue; rx_q->priv_data = priv; pp_params.flags = PP_FLAG_DMA_MAP; pp_params.pool_size = DMA_RX_SIZE; - pp_params.order = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); + num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); + pp_params.order = ilog2(num_pages); pp_params.nid = dev_to_node(priv->device); pp_params.dev = priv->device; pp_params.dma_dir = DMA_FROM_DEVICE; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 8f46aa1ddec0..cb7637364b40 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1235,6 +1235,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) macsec_rxsa_put(rx_sa); macsec_rxsc_put(rx_sc); + skb_orphan(skb); ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) count_rx(dev, skb->len); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 3c8186f269f9..2fea5541c35a 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -763,6 +763,8 @@ static int ksz9031_get_features(struct phy_device *phydev) * Whenever the device's Asymmetric Pause capability is set to 1, * link-up may fail after a link-up to link-down transition. * + * The Errata Sheet is for ksz9031, but ksz9021 has the same issue + * * Workaround: * Do not enable the Asymmetric Pause capability bit. */ @@ -1076,6 +1078,7 @@ static struct phy_driver ksphy_driver[] = { /* PHY_GBIT_FEATURES */ .driver_data = &ksz9021_type, .probe = kszphy_probe, + .get_features = ksz9031_get_features, .config_init = ksz9021_config_init, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index a221dd552c3c..a5bf0874c7d8 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -105,14 +105,17 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable) { + u16 lb_dis = BIT(1); + if (disable) - ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1); + ns_exp_write(phydev, 0x1c0, + ns_exp_read(phydev, 0x1c0) | lb_dis); else ns_exp_write(phydev, 0x1c0, - ns_exp_read(phydev, 0x1c0) & 0xfffe); + ns_exp_read(phydev, 0x1c0) & ~lb_dis); pr_debug("10BASE-T HDX loopback %s\n", - (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on"); + (ns_exp_read(phydev, 0x1c0) & lb_dis) ? "off" : "on"); } static int ns_config_init(struct phy_device *phydev) diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index a30e41a56085..9a1b006904a7 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1415,6 +1415,8 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) netif_wake_queue(ppp->dev); else netif_stop_queue(ppp->dev); + } else { + kfree_skb(skb); } ppp_xmit_unlock(ppp); } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 50c05d0f44cb..00cab3f43a4c 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -681,8 +681,12 @@ cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf) u8 ep; for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) { - e = intf->cur_altsetting->endpoint + ep; + + /* ignore endpoints which cannot transfer data */ + if (!usb_endpoint_maxp(&e->desc)) + continue; + switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { case USB_ENDPOINT_XFER_INT: if (usb_endpoint_dir_in(&e->desc)) { diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 72514c46b478..ef1d667b0108 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -100,6 +100,11 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) int intr = 0; e = alt->endpoint + ep; + + /* ignore endpoints which cannot transfer data */ + if (!usb_endpoint_maxp(&e->desc)) + continue; + switch (e->desc.bmAttributes) { case USB_ENDPOINT_XFER_INT: if (!usb_endpoint_dir_in(&e->desc)) @@ -339,6 +344,8 @@ void usbnet_update_max_qlen(struct usbnet *dev) { enum usb_device_speed speed = dev->udev->speed; + if (!dev->rx_urb_size || !dev->hard_mtu) + goto insanity; switch (speed) { case USB_SPEED_HIGH: dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size; @@ -355,6 +362,7 @@ void usbnet_update_max_qlen(struct usbnet *dev) dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu; break; default: +insanity: dev->rx_qlen = dev->tx_qlen = 4; } } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 6e84328bdd40..a4b38a980c3c 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1154,7 +1154,8 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) struct sk_buff *skb; int err; - if (family == AF_INET6 && !ipv6_mod_enabled()) + if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) && + !ipv6_mod_enabled()) return 0; skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL); diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 2985bb17decd..4d5d10c01064 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -841,7 +841,7 @@ static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar, struct wmi_ch_info_ev_arg *arg) { const void **tb; - const struct wmi_chan_info_event *ev; + const struct wmi_tlv_chan_info_event *ev; int ret; tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index d691f06e58f2..649b229a41e9 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1615,6 +1615,22 @@ struct chan_info_params { #define WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL BIT(9) +struct wmi_tlv_chan_info_event { + __le32 err_code; + __le32 freq; + __le32 cmd_flags; + __le32 noise_floor; + __le32 rx_clear_count; + __le32 cycle_count; + __le32 chan_tx_pwr_range; + __le32 chan_tx_pwr_tp; + __le32 rx_frame_count; + __le32 my_bss_rx_cycle_count; + __le32 rx_11b_mode_data_duration; + __le32 tx_frame_cnt; + __le32 mac_clk_mhz; +} __packed; + struct wmi_tlv_mgmt_tx_compl_ev { __le32 desc_id; __le32 status; diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 838768c98adc..e80dbe7e8f4c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -6533,14 +6533,6 @@ struct wmi_chan_info_event { __le32 noise_floor; __le32 rx_clear_count; __le32 cycle_count; - __le32 chan_tx_pwr_range; - __le32 chan_tx_pwr_tp; - __le32 rx_frame_count; - __le32 my_bss_rx_cycle_count; - __le32 rx_11b_mode_data_duration; - __le32 tx_frame_cnt; - __le32 mac_clk_mhz; - } __packed; struct wmi_10_4_chan_info_event { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 5de54d1559dd..8b0b464a1f21 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -887,11 +887,13 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm) * firmware versions. Unfortunately, we don't have a TLV API * flag to rely on, so rely on the major version which is in * the first byte of ucode_ver. This was implemented - * initially on version 38 and then backported to 36, 29 and - * 17. + * initially on version 38 and then backported to29 and 17. + * The intention was to have it in 36 as well, but not all + * 8000 family got this feature enabled. The 8000 family is + * the only one using version 36, so skip this version + * entirely. */ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 || - IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 || IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 || IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17; } diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index afac2481909b..20436a289d5c 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -50,7 +50,8 @@ static const struct lbs_fw_table fw_table[] = { { MODEL_8388, "libertas/usb8388_v5.bin", NULL }, { MODEL_8388, "libertas/usb8388.bin", NULL }, { MODEL_8388, "usb8388.bin", NULL }, - { MODEL_8682, "libertas/usb8682.bin", NULL } + { MODEL_8682, "libertas/usb8682.bin", NULL }, + { 0, NULL, NULL } }; static const struct usb_device_id if_usb_table[] = { diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c index 38368d19aa6f..83c96a47914f 100644 --- a/drivers/net/wireless/mediatek/mt76/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mmio.c @@ -43,7 +43,7 @@ static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data, int len) { - __iowrite32_copy(dev->mmio.regs + offset, data, len >> 2); + __iowrite32_copy(dev->mmio.regs + offset, data, DIV_ROUND_UP(len, 4)); } static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index cdad2c8dc297..b941fa4a1bcd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -257,9 +257,8 @@ static int mt7615_driver_own(struct mt7615_dev *dev) static int mt7615_load_patch(struct mt7615_dev *dev) { - const struct firmware *fw; const struct mt7615_patch_hdr *hdr; - const char *firmware = MT7615_ROM_PATCH; + const struct firmware *fw = NULL; int len, ret, sem; sem = mt7615_mcu_patch_sem_ctrl(dev, 1); @@ -273,9 +272,9 @@ static int mt7615_load_patch(struct mt7615_dev *dev) return -EAGAIN; } - ret = request_firmware(&fw, firmware, dev->mt76.dev); + ret = request_firmware(&fw, MT7615_ROM_PATCH, dev->mt76.dev); if (ret) - return ret; + goto out; if (!fw || !fw->data || fw->size < sizeof(*hdr)) { dev_err(dev->mt76.dev, "Invalid firmware\n"); @@ -339,14 +338,12 @@ static u32 gen_dl_mode(u8 feature_set, bool is_cr4) static int mt7615_load_ram(struct mt7615_dev *dev) { - const struct firmware *fw; const struct mt7615_fw_trailer *hdr; - const char *n9_firmware = MT7615_FIRMWARE_N9; - const char *cr4_firmware = MT7615_FIRMWARE_CR4; u32 n9_ilm_addr, offset; int i, ret; + const struct firmware *fw; - ret = request_firmware(&fw, n9_firmware, dev->mt76.dev); + ret = request_firmware(&fw, MT7615_FIRMWARE_N9, dev->mt76.dev); if (ret) return ret; @@ -394,7 +391,7 @@ static int mt7615_load_ram(struct mt7615_dev *dev) release_firmware(fw); - ret = request_firmware(&fw, cr4_firmware, dev->mt76.dev); + ret = request_firmware(&fw, MT7615_FIRMWARE_CR4, dev->mt76.dev); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index f02ffcffe637..f83615dbe1c5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -25,9 +25,9 @@ #define MT7615_RX_RING_SIZE 1024 #define MT7615_RX_MCU_RING_SIZE 512 -#define MT7615_FIRMWARE_CR4 "mt7615_cr4.bin" -#define MT7615_FIRMWARE_N9 "mt7615_n9.bin" -#define MT7615_ROM_PATCH "mt7615_rom_patch.bin" +#define MT7615_FIRMWARE_CR4 "mediatek/mt7615_cr4.bin" +#define MT7615_FIRMWARE_N9 "mediatek/mt7615_n9.bin" +#define MT7615_ROM_PATCH "mediatek/mt7615_rom_patch.bin" #define MT7615_EEPROM_SIZE 1024 #define MT7615_TOKEN_SIZE 4096 diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index fb87ce7fbdf6..185eea83aada 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -164,7 +164,7 @@ static void mt76u_copy(struct mt76_dev *dev, u32 offset, int i, ret; mutex_lock(&usb->usb_ctrl_mtx); - for (i = 0; i < (len / 4); i++) { + for (i = 0; i < DIV_ROUND_UP(len, 4); i++) { put_unaligned_le32(val[i], usb->data); ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE, USB_DIR_OUT | USB_TYPE_VENDOR, diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 353871c27779..23dd06afef3d 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -206,6 +206,23 @@ static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, return 0; } +static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma, + struct rtw_pci_rx_ring *rx_ring, + u32 idx, u32 desc_sz) +{ + struct device *dev = rtwdev->dev; + struct rtw_pci_rx_buffer_desc *buf_desc; + int buf_sz = RTK_PCI_RX_BUF_SIZE; + + dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE); + + buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + + idx * desc_sz); + memset(buf_desc, 0, sizeof(*buf_desc)); + buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); + buf_desc->dma = cpu_to_le32(dma); +} + static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, struct rtw_pci_rx_ring *rx_ring, u8 desc_size, u32 len) @@ -765,6 +782,7 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, u32 pkt_offset; u32 pkt_desc_sz = chip->rx_pkt_desc_sz; u32 buf_desc_sz = chip->rx_buf_desc_sz; + u32 new_len; u8 *rx_desc; dma_addr_t dma; @@ -783,8 +801,8 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, rtw_pci_dma_check(rtwdev, ring, cur_rp); skb = ring->buf[cur_rp]; dma = *((dma_addr_t *)skb->cb); - pci_unmap_single(rtwpci->pdev, dma, RTK_PCI_RX_BUF_SIZE, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, + DMA_FROM_DEVICE); rx_desc = skb->data; chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); @@ -792,40 +810,35 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + pkt_stat.shift; - if (pkt_stat.is_c2h) { - /* keep rx_desc, halmac needs it */ - skb_put(skb, pkt_stat.pkt_len + pkt_offset); + /* allocate a new skb for this frame, + * discard the frame if none available + */ + new_len = pkt_stat.pkt_len + pkt_offset; + new = dev_alloc_skb(new_len); + if (WARN_ONCE(!new, "rx routine starvation\n")) + goto next_rp; - /* pass offset for further operation */ - *((u32 *)skb->cb) = pkt_offset; - skb_queue_tail(&rtwdev->c2h_queue, skb); + /* put the DMA data including rx_desc from phy to new skb */ + skb_put_data(new, skb->data, new_len); + + if (pkt_stat.is_c2h) { + /* pass rx_desc & offset for further operation */ + *((u32 *)new->cb) = pkt_offset; + skb_queue_tail(&rtwdev->c2h_queue, new); ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); } else { - /* remove rx_desc, maybe use skb_pull? */ - skb_put(skb, pkt_stat.pkt_len); - skb_reserve(skb, pkt_offset); - - /* alloc a smaller skb to mac80211 */ - new = dev_alloc_skb(pkt_stat.pkt_len); - if (!new) { - new = skb; - } else { - skb_put_data(new, skb->data, skb->len); - dev_kfree_skb_any(skb); - } - /* TODO: merge into rx.c */ - rtw_rx_stats(rtwdev, pkt_stat.vif, skb); + /* remove rx_desc */ + skb_pull(new, pkt_offset); + + rtw_rx_stats(rtwdev, pkt_stat.vif, new); memcpy(new->cb, &rx_status, sizeof(rx_status)); ieee80211_rx_irqsafe(rtwdev->hw, new); } - /* skb delivered to mac80211, alloc a new one in rx ring */ - new = dev_alloc_skb(RTK_PCI_RX_BUF_SIZE); - if (WARN(!new, "rx routine starvation\n")) - return; - - ring->buf[cur_rp] = new; - rtw_pci_reset_rx_desc(rtwdev, new, ring, cur_rp, buf_desc_sz); +next_rp: + /* new skb delivered to mac80211, re-enable original skb DMA */ + rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp, + buf_desc_sz); /* host read next element in ring */ if (++cur_rp >= ring->r.len) diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index da7e63fca9f5..a9999d10ae81 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c @@ -223,7 +223,6 @@ void zd_mac_clear(struct zd_mac *mac) { flush_workqueue(zd_workqueue); zd_chip_clear(&mac->chip); - lockdep_assert_held(&mac->lock); ZD_MEMCLEAR(mac, sizeof(struct zd_mac)); } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index af831d3d15d0..30de7efef003 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -509,14 +509,16 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, down_write(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { - if (ns->head->ns_id != le32_to_cpu(desc->nsids[n])) + unsigned nsid = le32_to_cpu(desc->nsids[n]); + + if (ns->head->ns_id < nsid) continue; - nvme_update_ns_ana_state(desc, ns); + if (ns->head->ns_id == nsid) + nvme_update_ns_ana_state(desc, ns); if (++n == nr_nsids) break; } up_write(&ctrl->namespaces_rwsem); - WARN_ON_ONCE(n < nr_nsids); return 0; } diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 4dc12ea52f23..51800a9ce9a9 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -81,9 +81,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, goto out; host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]); - data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]); + data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part, + sectors[READ]), 1000); host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]); - data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]); + data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part, + sectors[WRITE]), 1000); put_unaligned_le64(host_reads, &slog->host_reads[0]); put_unaligned_le64(data_units_read, &slog->data_units_read[0]); @@ -111,11 +113,11 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req, if (!ns->bdev) continue; host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]); - data_units_read += - part_stat_read(ns->bdev->bd_part, sectors[READ]); + data_units_read += DIV_ROUND_UP( + part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000); host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]); - data_units_written += - part_stat_read(ns->bdev->bd_part, sectors[WRITE]); + data_units_written += DIV_ROUND_UP( + part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000); } rcu_read_unlock(); diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 3c730103e637..14be463e25b0 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -156,6 +156,15 @@ static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba) return container_of(hba, struct dino_device, hba); } +/* Check if PCI device is behind a Card-mode Dino. */ +static int pci_dev_is_behind_card_dino(struct pci_dev *dev) +{ + struct dino_device *dino_dev; + + dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge)); + return is_card_dino(&dino_dev->hba.dev->id); +} + /* * Dino Configuration Space Accessor Functions */ @@ -437,6 +446,21 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev) } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus ); +#ifdef CONFIG_TULIP +static void pci_fixup_tulip(struct pci_dev *dev) +{ + if (!pci_dev_is_behind_card_dino(dev)) + return; + if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM)) + return; + pr_warn("%s: HP HSC-PCI Cards with card-mode Dino not yet supported.\n", + pci_name(dev)); + /* Disable this card by zeroing the PCI resources */ + memset(&dev->resource[0], 0, sizeof(dev->resource[0])); + memset(&dev->resource[1], 0, sizeof(dev->resource[1])); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_DEC, PCI_ANY_ID, pci_fixup_tulip); +#endif /* CONFIG_TULIP */ static void __init dino_bios_init(void) diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c index 5d3fb2abad1d..bec19d4814ab 100644 --- a/drivers/platform/chrome/cros_ec_rpmsg.c +++ b/drivers/platform/chrome/cros_ec_rpmsg.c @@ -41,6 +41,7 @@ struct cros_ec_rpmsg { struct rpmsg_device *rpdev; struct completion xfer_ack; struct work_struct host_event_work; + struct rpmsg_endpoint *ept; }; /** @@ -72,7 +73,6 @@ static int cros_ec_pkt_xfer_rpmsg(struct cros_ec_device *ec_dev, struct cros_ec_command *ec_msg) { struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv; - struct rpmsg_device *rpdev = ec_rpmsg->rpdev; struct ec_host_response *response; unsigned long timeout; int len; @@ -85,7 +85,7 @@ static int cros_ec_pkt_xfer_rpmsg(struct cros_ec_device *ec_dev, dev_dbg(ec_dev->dev, "prepared, len=%d\n", len); reinit_completion(&ec_rpmsg->xfer_ack); - ret = rpmsg_send(rpdev->ept, ec_dev->dout, len); + ret = rpmsg_send(ec_rpmsg->ept, ec_dev->dout, len); if (ret) { dev_err(ec_dev->dev, "rpmsg send failed\n"); return ret; @@ -196,11 +196,24 @@ static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data, return 0; } +static struct rpmsg_endpoint * +cros_ec_rpmsg_create_ept(struct rpmsg_device *rpdev) +{ + struct rpmsg_channel_info chinfo = {}; + + strscpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE); + chinfo.src = rpdev->src; + chinfo.dst = RPMSG_ADDR_ANY; + + return rpmsg_create_ept(rpdev, cros_ec_rpmsg_callback, NULL, chinfo); +} + static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev) { struct device *dev = &rpdev->dev; struct cros_ec_rpmsg *ec_rpmsg; struct cros_ec_device *ec_dev; + int ret; ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL); if (!ec_dev) @@ -225,7 +238,18 @@ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev) INIT_WORK(&ec_rpmsg->host_event_work, cros_ec_rpmsg_host_event_function); - return cros_ec_register(ec_dev); + ec_rpmsg->ept = cros_ec_rpmsg_create_ept(rpdev); + if (!ec_rpmsg->ept) + return -ENOMEM; + + ret = cros_ec_register(ec_dev); + if (ret < 0) { + rpmsg_destroy_ept(ec_rpmsg->ept); + cancel_work_sync(&ec_rpmsg->host_event_work); + return ret; + } + + return 0; } static void cros_ec_rpmsg_remove(struct rpmsg_device *rpdev) @@ -233,6 +257,7 @@ static void cros_ec_rpmsg_remove(struct rpmsg_device *rpdev) struct cros_ec_device *ec_dev = dev_get_drvdata(&rpdev->dev); struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv; + rpmsg_destroy_ept(ec_rpmsg->ept); cancel_work_sync(&ec_rpmsg->host_event_work); } @@ -249,7 +274,6 @@ static struct rpmsg_driver cros_ec_driver_rpmsg = { }, .probe = cros_ec_rpmsg_probe, .remove = cros_ec_rpmsg_remove, - .callback = cros_ec_rpmsg_callback, }; module_rpmsg_driver(cros_ec_driver_rpmsg); diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c index d9542c661ddc..9ea1a2a19f86 100644 --- a/drivers/platform/x86/intel_int0002_vgpio.c +++ b/drivers/platform/x86/intel_int0002_vgpio.c @@ -144,6 +144,7 @@ static struct irq_chip int0002_cht_irqchip = { * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI * and we don't want to mess with the ACPI SCI irq settings. */ + .flags = IRQCHIP_SKIP_SET_WAKE, }; static const struct x86_cpu_id int0002_cpu_ids[] = { diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index c510d0d72475..3b6b8dcc4767 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -878,10 +878,14 @@ static int pmc_core_probe(struct platform_device *pdev) if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids)) pmcdev->map = &cnp_reg_map; - if (lpit_read_residency_count_address(&slp_s0_addr)) + if (lpit_read_residency_count_address(&slp_s0_addr)) { pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT; - else + + if (page_is_ram(PHYS_PFN(pmcdev->base_addr))) + return -ENODEV; + } else { pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset; + } pmcdev->regbase = ioremap(pmcdev->base_addr, pmcdev->map->regmap_length); diff --git a/drivers/platform/x86/intel_pmc_core_pltdrv.c b/drivers/platform/x86/intel_pmc_core_pltdrv.c index a8754a6db1b8..186540014c48 100644 --- a/drivers/platform/x86/intel_pmc_core_pltdrv.c +++ b/drivers/platform/x86/intel_pmc_core_pltdrv.c @@ -18,8 +18,16 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +static void intel_pmc_core_release(struct device *dev) +{ + /* Nothing to do. */ +} + static struct platform_device pmc_core_device = { .name = "intel_pmc_core", + .dev = { + .release = intel_pmc_core_release, + }, }; /* diff --git a/drivers/ras/Makefile b/drivers/ras/Makefile index ef6777e14d3d..6f0404f50107 100644 --- a/drivers/ras/Makefile +++ b/drivers/ras/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_RAS) += ras.o debugfs.o +obj-$(CONFIG_RAS) += ras.o +obj-$(CONFIG_DEBUG_FS) += debugfs.o obj-$(CONFIG_RAS_CEC) += cec.o diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index e0c0cf462004..1b35b8311650 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -5640,7 +5640,7 @@ static int __init regulator_init(void) /* init early to allow our consumers to complete system booting */ core_initcall(regulator_init); -static int __init regulator_late_cleanup(struct device *dev, void *data) +static int regulator_late_cleanup(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); const struct regulator_ops *ops = rdev->desc->ops; @@ -5689,17 +5689,8 @@ static int __init regulator_late_cleanup(struct device *dev, void *data) return 0; } -static int __init regulator_init_complete(void) +static void regulator_init_complete_work_function(struct work_struct *work) { - /* - * Since DT doesn't provide an idiomatic mechanism for - * enabling full constraints and since it's much more natural - * with DT to provide them just assume that a DT enabled - * system has full constraints. - */ - if (of_have_populated_dt()) - has_full_constraints = true; - /* * Regulators may had failed to resolve their input supplies * when were registered, either because the input supply was @@ -5717,6 +5708,35 @@ static int __init regulator_init_complete(void) */ class_for_each_device(®ulator_class, NULL, NULL, regulator_late_cleanup); +} + +static DECLARE_DELAYED_WORK(regulator_init_complete_work, + regulator_init_complete_work_function); + +static int __init regulator_init_complete(void) +{ + /* + * Since DT doesn't provide an idiomatic mechanism for + * enabling full constraints and since it's much more natural + * with DT to provide them just assume that a DT enabled + * system has full constraints. + */ + if (of_have_populated_dt()) + has_full_constraints = true; + + /* + * We punt completion for an arbitrary amount of time since + * systems like distros will load many drivers from userspace + * so consumers might not always be ready yet, this is + * particularly an issue with laptops where this might bounce + * the display off then on. Ideally we'd get a notification + * from userspace when this happens but we don't so just wait + * a bit and hope we waited long enough. It'd be better if + * we'd only do this on systems that need it, and a kernel + * command line option might be useful. + */ + schedule_delayed_work(®ulator_init_complete_work, + msecs_to_jiffies(30000)); return 0; } diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c index 5647e2f97ff8..4b9f618b07e9 100644 --- a/drivers/regulator/lm363x-regulator.c +++ b/drivers/regulator/lm363x-regulator.c @@ -30,13 +30,13 @@ /* LM3632 */ #define LM3632_BOOST_VSEL_MAX 0x26 -#define LM3632_LDO_VSEL_MAX 0x29 +#define LM3632_LDO_VSEL_MAX 0x28 #define LM3632_VBOOST_MIN 4500000 #define LM3632_VLDO_MIN 4000000 /* LM36274 */ #define LM36274_BOOST_VSEL_MAX 0x3f -#define LM36274_LDO_VSEL_MAX 0x34 +#define LM36274_LDO_VSEL_MAX 0x32 #define LM36274_VOLTAGE_MIN 4000000 /* Common */ @@ -226,7 +226,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = { .of_match = "vboost", .id = LM36274_BOOST, .ops = &lm363x_boost_voltage_table_ops, - .n_voltages = LM36274_BOOST_VSEL_MAX, + .n_voltages = LM36274_BOOST_VSEL_MAX + 1, .min_uV = LM36274_VOLTAGE_MIN, .uV_step = LM363X_STEP_50mV, .type = REGULATOR_VOLTAGE, @@ -239,7 +239,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = { .of_match = "vpos", .id = LM36274_LDO_POS, .ops = &lm363x_regulator_voltage_table_ops, - .n_voltages = LM36274_LDO_VSEL_MAX, + .n_voltages = LM36274_LDO_VSEL_MAX + 1, .min_uV = LM36274_VOLTAGE_MIN, .uV_step = LM363X_STEP_50mV, .type = REGULATOR_VOLTAGE, @@ -254,7 +254,7 @@ static const struct regulator_desc lm363x_regulator_desc[] = { .of_match = "vneg", .id = LM36274_LDO_NEG, .ops = &lm363x_regulator_voltage_table_ops, - .n_voltages = LM36274_LDO_VSEL_MAX, + .n_voltages = LM36274_LDO_VSEL_MAX + 1, .min_uV = LM36274_VOLTAGE_MIN, .uV_step = LM363X_STEP_50mV, .type = REGULATOR_VOLTAGE, diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 65f1fe343c64..5efc959493ec 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -546,6 +546,8 @@ static void send_mode_select(struct work_struct *work) spin_unlock(&ctlr->ms_lock); retry: + memset(cdb, 0, sizeof(cdb)); + data_size = rdac_failover_get(ctlr, &list, cdb); RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index da83034d4759..afcd9a885884 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -289,8 +289,13 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; - if (!vha->flags.online) - goto done; + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || + fcport->loop_id == FC_NO_LOOP_ID) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC - not sending command.\n", + __func__, fcport->port_name); + return rval; + } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) @@ -1262,8 +1267,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) struct port_database_24xx *pd; struct qla_hw_data *ha = vha->hw; - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || + fcport->loop_id == FC_NO_LOOP_ID) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC - not sending command.\n", + __func__, fcport->port_name); return rval; + } fcport->disc_state = DSC_GPDB; @@ -1953,8 +1963,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) return; } - if (fcport->disc_state == DSC_DELETE_PEND) + if ((fcport->disc_state == DSC_DELETE_PEND) || + (fcport->disc_state == DSC_DELETED)) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return; + } if (ea->sp->gen2 != fcport->login_gen) { /* target side must have changed it. */ @@ -6698,8 +6711,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) } /* Clear all async request states across all VPs. */ - list_for_each_entry(fcport, &vha->vp_fcports, list) + list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + fcport->scan_state = 0; + } spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 98e60a34afd9..4fda308c3ef5 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -5086,6 +5086,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) if (fcport) { fcport->id_changed = 1; fcport->scan_state = QLA_FCPORT_FOUND; + fcport->chip_reset = vha->hw->base_qpair->chip_reset; memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); if (pla) { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 1c1f63be6eed..459c28aa3b94 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1209,7 +1209,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess) sess->logout_on_delete = 0; sess->logo_ack_needed = 0; sess->fw_login_state = DSC_LS_PORT_UNAVAIL; - sess->scan_state = 0; } } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 11e64b50497f..4e88d7e9cf9a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1089,6 +1089,18 @@ static void scsi_initialize_rq(struct request *rq) cmd->retries = 0; } +/* + * Only called when the request isn't completed by SCSI, and not freed by + * SCSI + */ +static void scsi_cleanup_rq(struct request *rq) +{ + if (rq->rq_flags & RQF_DONTPREP) { + scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); + rq->rq_flags &= ~RQF_DONTPREP; + } +} + /* Add a command to the list used by the aacraid and dpt_i2o drivers */ void scsi_add_cmd_to_list(struct scsi_cmnd *cmd) { @@ -1821,6 +1833,7 @@ static const struct blk_mq_ops scsi_mq_ops = { .init_request = scsi_mq_init_request, .exit_request = scsi_mq_exit_request, .initialize_rq_fn = scsi_initialize_rq, + .cleanup_rq = scsi_cleanup_rq, .busy = scsi_mq_lld_busy, .map_queues = scsi_map_queues, }; diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c index 19d4cbc93a17..c470e24f1dfa 100644 --- a/drivers/soc/amlogic/meson-clk-measure.c +++ b/drivers/soc/amlogic/meson-clk-measure.c @@ -11,6 +11,8 @@ #include <linux/debugfs.h> #include <linux/regmap.h> +static DEFINE_MUTEX(measure_lock); + #define MSR_CLK_DUTY 0x0 #define MSR_CLK_REG0 0x4 #define MSR_CLK_REG1 0x8 @@ -360,6 +362,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id, unsigned int val; int ret; + ret = mutex_lock_interruptible(&measure_lock); + if (ret) + return ret; + regmap_write(priv->regmap, MSR_CLK_REG0, 0); /* Set measurement duration */ @@ -377,8 +383,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id, ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0, val, !(val & MSR_BUSY), 10, 10000); - if (ret) + if (ret) { + mutex_unlock(&measure_lock); return ret; + } /* Disable */ regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0); @@ -386,6 +394,8 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id, /* Get the value in multiple of gate time counts */ regmap_read(priv->regmap, MSR_CLK_REG2, &val); + mutex_unlock(&measure_lock); + if (val >= MSR_VAL_MASK) return -EINVAL; diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 2bbf49e5d441..9583c542c47f 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -55,6 +55,7 @@ config ARCH_EMEV2 config ARCH_R7S72100 bool "RZ/A1H (R7S72100)" + select ARM_ERRATA_754322 select PM select PM_GENERIC_DOMAINS select RENESAS_OSTM @@ -78,6 +79,7 @@ config ARCH_R8A73A4 config ARCH_R8A7740 bool "R-Mobile A1 (R8A77400)" select ARCH_RMOBILE + select ARM_ERRATA_754322 select RENESAS_INTC_IRQPIN config ARCH_R8A7743 @@ -105,10 +107,12 @@ config ARCH_R8A77470 config ARCH_R8A7778 bool "R-Car M1A (R8A77781)" select ARCH_RCAR_GEN1 + select ARM_ERRATA_754322 config ARCH_R8A7779 bool "R-Car H1 (R8A77790)" select ARCH_RCAR_GEN1 + select ARM_ERRATA_754322 select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP select SYSC_R8A7779 @@ -152,6 +156,7 @@ config ARCH_R9A06G032 config ARCH_SH73A0 bool "SH-Mobile AG5 (R8A73A00)" select ARCH_RMOBILE + select ARM_ERRATA_754322 select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP select RENESAS_INTC_IRQPIN diff --git a/drivers/soc/renesas/rmobile-sysc.c b/drivers/soc/renesas/rmobile-sysc.c index 421ae1c887d8..54b616ad4a62 100644 --- a/drivers/soc/renesas/rmobile-sysc.c +++ b/drivers/soc/renesas/rmobile-sysc.c @@ -48,12 +48,8 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d) static int rmobile_pd_power_down(struct generic_pm_domain *genpd) { struct rmobile_pm_domain *rmobile_pd = to_rmobile_pd(genpd); - unsigned int mask; + unsigned int mask = BIT(rmobile_pd->bit_shift); - if (rmobile_pd->bit_shift == ~0) - return -EBUSY; - - mask = BIT(rmobile_pd->bit_shift); if (rmobile_pd->suspend) { int ret = rmobile_pd->suspend(); @@ -80,14 +76,10 @@ static int rmobile_pd_power_down(struct generic_pm_domain *genpd) static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd) { - unsigned int mask; + unsigned int mask = BIT(rmobile_pd->bit_shift); unsigned int retry_count; int ret = 0; - if (rmobile_pd->bit_shift == ~0) - return 0; - - mask = BIT(rmobile_pd->bit_shift); if (__raw_readl(rmobile_pd->base + PSTR) & mask) return ret; @@ -122,11 +114,15 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd) struct dev_power_governor *gov = rmobile_pd->gov; genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; - genpd->power_off = rmobile_pd_power_down; - genpd->power_on = rmobile_pd_power_up; - genpd->attach_dev = cpg_mstp_attach_dev; - genpd->detach_dev = cpg_mstp_detach_dev; - __rmobile_pd_power_up(rmobile_pd); + genpd->attach_dev = cpg_mstp_attach_dev; + genpd->detach_dev = cpg_mstp_detach_dev; + + if (!(genpd->flags & GENPD_FLAG_ALWAYS_ON)) { + genpd->power_off = rmobile_pd_power_down; + genpd->power_on = rmobile_pd_power_up; + __rmobile_pd_power_up(rmobile_pd); + } + pm_genpd_init(genpd, gov ? : &simple_qos_governor, false); } @@ -270,6 +266,11 @@ static void __init rmobile_setup_pm_domain(struct device_node *np, break; case PD_NORMAL: + if (pd->bit_shift == ~0) { + /* Top-level always-on domain */ + pr_debug("PM domain %s is always-on domain\n", name); + pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON; + } break; } diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 840b1b8ff3dc..dfdcebb38830 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -319,6 +319,13 @@ static void bcm2835_spi_reset_hw(struct spi_controller *ctlr) BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_DMAEN | BCM2835_SPI_CS_TA); + /* + * Transmission sometimes breaks unless the DONE bit is written at the + * end of every transfer. The spec says it's a RO bit. Either the + * spec is wrong and the bit is actually of type RW1C, or it's a + * hardware erratum. + */ + cs |= BCM2835_SPI_CS_DONE; /* and reset RX/TX FIFOS */ cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX; @@ -477,7 +484,9 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr, bcm2835_wr_fifo_count(bs, bs->rx_prologue); bcm2835_wait_tx_fifo_empty(bs); bcm2835_rd_fifo_count(bs, bs->rx_prologue); - bcm2835_spi_reset_hw(ctlr); + bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX + | BCM2835_SPI_CS_CLEAR_TX + | BCM2835_SPI_CS_DONE); dma_sync_single_for_device(ctlr->dma_rx->device->dev, sg_dma_address(&tfr->rx_sg.sgl[0]), @@ -498,7 +507,8 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr, | BCM2835_SPI_CS_DMAEN); bcm2835_wr_fifo_count(bs, tx_remaining); bcm2835_wait_tx_fifo_empty(bs); - bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX); + bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX + | BCM2835_SPI_CS_DONE); } if (likely(!bs->tx_spillover)) { diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index 18c06568805e..86789dbaf577 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -172,8 +172,10 @@ static int dw_spi_mmio_probe(struct platform_device *pdev) /* Optional clock needed to access the registers */ dwsmmio->pclk = devm_clk_get_optional(&pdev->dev, "pclk"); - if (IS_ERR(dwsmmio->pclk)) - return PTR_ERR(dwsmmio->pclk); + if (IS_ERR(dwsmmio->pclk)) { + ret = PTR_ERR(dwsmmio->pclk); + goto out_clk; + } ret = clk_prepare_enable(dwsmmio->pclk); if (ret) goto out_clk; diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 53335ccc98f6..545fc8189fb0 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -886,9 +886,11 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id) trans_mode); } } + + return IRQ_HANDLED; } - return IRQ_HANDLED; + return IRQ_NONE; } static const struct of_device_id fsl_dspi_dt_ids[] = { diff --git a/drivers/staging/erofs/zmap.c b/drivers/staging/erofs/zmap.c index 9c0bd65c46bf..c2359321ca13 100644 --- a/drivers/staging/erofs/zmap.c +++ b/drivers/staging/erofs/zmap.c @@ -86,12 +86,11 @@ static int fill_inode_lazy(struct inode *inode) vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + ((h->h_clusterbits >> 5) & 7); + set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); unmap_done: kunmap_atomic(kaddr); unlock_page(page); put_page(page); - - set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); out_unlock: clear_and_wake_up_bit(EROFS_V_BL_Z_BIT, &vi->flags); return err; diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c index c3665f0e87a2..46dcb46bb927 100644 --- a/drivers/staging/media/hantro/hantro_drv.c +++ b/drivers/staging/media/hantro/hantro_drv.c @@ -724,6 +724,7 @@ static int hantro_probe(struct platform_device *pdev) dev_err(vpu->dev, "Could not set DMA coherent mask.\n"); return ret; } + vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32)); for (i = 0; i < vpu->variant->num_irqs; i++) { const char *irq_name = vpu->variant->irqs[i].name; diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c index f29e28df36ed..bfa4b254c4e4 100644 --- a/drivers/staging/media/imx/imx6-mipi-csi2.c +++ b/drivers/staging/media/imx/imx6-mipi-csi2.c @@ -243,7 +243,7 @@ static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2) } /* Waits for low-power LP-11 state on data and clock lanes. */ -static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2) +static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2) { u32 mask, reg; int ret; @@ -254,11 +254,9 @@ static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2) ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg, (reg & mask) == mask, 0, 500000); if (ret) { - v4l2_err(&csi2->sd, "LP-11 timeout, phy_state = 0x%08x\n", reg); - return ret; + v4l2_warn(&csi2->sd, "LP-11 wait timeout, likely a sensor driver bug, expect capture failures.\n"); + v4l2_warn(&csi2->sd, "phy_state = 0x%08x\n", reg); } - - return 0; } /* Wait for active clock on the clock lane. */ @@ -316,9 +314,7 @@ static int csi2_start(struct csi2_dev *csi2) csi2_enable(csi2, true); /* Step 5 */ - ret = csi2_dphy_wait_stopstate(csi2); - if (ret) - goto err_assert_reset; + csi2_dphy_wait_stopstate(csi2); /* Step 6 */ ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1); diff --git a/drivers/staging/media/tegra-vde/Kconfig b/drivers/staging/media/tegra-vde/Kconfig index 2e7f644ae591..ba49ea50b8c0 100644 --- a/drivers/staging/media/tegra-vde/Kconfig +++ b/drivers/staging/media/tegra-vde/Kconfig @@ -3,7 +3,7 @@ config TEGRA_VDE tristate "NVIDIA Tegra Video Decoder Engine driver" depends on ARCH_TEGRA || COMPILE_TEST select DMA_SHARED_BUFFER - select IOMMU_IOVA if IOMMU_SUPPORT + select IOMMU_IOVA if (IOMMU_SUPPORT || COMPILE_TEST) select SRAM help Say Y here to enable support for the NVIDIA Tegra video decoder diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 04a22663b4fb..51d97ec4f58f 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -122,28 +122,13 @@ static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si) */ static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width) { - static const int default_resolutions[][2] = { - { 800, 600 }, - { 1024, 768 }, - { 1280, 1024 }, - }; - u32 i, right_margin; - - for (i = 0; i < ARRAY_SIZE(default_resolutions); i++) { - if (default_resolutions[i][0] == si->lfb_width && - default_resolutions[i][1] == si->lfb_height) - break; - } - /* If not a default resolution used for textmode, this should be fine */ - if (i >= ARRAY_SIZE(default_resolutions)) - return true; - - /* If the right margin is 5 times smaller then the left one, reject */ - right_margin = si->lfb_width - (bgrt_tab.image_offset_x + bmp_width); - if (right_margin < (bgrt_tab.image_offset_x / 5)) - return false; + /* + * All x86 firmwares horizontally center the image (the yoffset + * calculations differ between boards, but xoffset is predictable). + */ + u32 expected_xoffset = (si->lfb_width - bmp_width) / 2; - return true; + return bgrt_tab.image_offset_x == expected_xoffset; } #else static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index d4e11b2e04f6..f131651502b8 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1141,7 +1141,8 @@ static int load_elf_binary(struct linux_binprm *bprm) * (since it grows up, and may collide early with the stack * growing down), and into the unused ELF_ET_DYN_BASE region. */ - if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && !interpreter) + if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && + loc->elf_ex.e_type == ET_DYN && !interpreter) current->mm->brk = current->mm->start_brk = ELF_ET_DYN_BASE; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 5df76c17775a..322ec4b839ed 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1343,6 +1343,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) struct tree_mod_elem *tm; struct extent_buffer *eb = NULL; struct extent_buffer *eb_root; + u64 eb_root_owner = 0; struct extent_buffer *old; struct tree_mod_root *old_root = NULL; u64 old_generation = 0; @@ -1380,6 +1381,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) free_extent_buffer(old); } } else if (old_root) { + eb_root_owner = btrfs_header_owner(eb_root); btrfs_tree_read_unlock(eb_root); free_extent_buffer(eb_root); eb = alloc_dummy_extent_buffer(fs_info, logical); @@ -1396,7 +1398,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) if (old_root) { btrfs_set_header_bytenr(eb, eb->start); btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); - btrfs_set_header_owner(eb, btrfs_header_owner(eb_root)); + btrfs_set_header_owner(eb, eb_root_owner); btrfs_set_header_level(eb, old_root->level); btrfs_set_header_generation(eb, old_generation); } @@ -5475,6 +5477,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root, advance_left = advance_right = 0; while (1) { + cond_resched(); if (advance_left && !left_end_reached) { ret = tree_advance(left_path, &left_level, left_root_level, diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 94660063a162..d9541d58ce3d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -43,6 +43,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_path_cachep; extern struct kmem_cache *btrfs_free_space_cachep; +extern struct kmem_cache *btrfs_free_space_bitmap_cachep; struct btrfs_ordered_sum; struct btrfs_ref; diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 43fdb2992956..6858a05606dd 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -474,6 +474,9 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) struct rb_root_cached *root; struct btrfs_delayed_root *delayed_root; + /* Not associated with any delayed_node */ + if (!delayed_item->delayed_node) + return; delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; BUG_ON(!delayed_root); @@ -1525,7 +1528,12 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, * we have reserved enough space when we start a new transaction, * so reserving metadata failure is impossible. */ - BUG_ON(ret); + if (ret < 0) { + btrfs_err(trans->fs_info, +"metadata reservation failed for delayed dir item deltiona, should have been reserved"); + btrfs_release_delayed_item(item); + goto end; + } mutex_lock(&node->mutex); ret = __btrfs_add_delayed_deletion_item(node, item); @@ -1534,7 +1542,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)", index, node->root->root_key.objectid, node->inode_id, ret); - BUG(); + btrfs_delayed_item_release_metadata(dir->root, item); + btrfs_release_delayed_item(item); } mutex_unlock(&node->mutex); end: diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 97beb351a10c..65af7eb3f7bd 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -416,6 +416,16 @@ int btrfs_verify_level_key(struct extent_buffer *eb, int level, */ if (btrfs_header_generation(eb) > fs_info->last_trans_committed) return 0; + + /* We have @first_key, so this @eb must have at least one item */ + if (btrfs_header_nritems(eb) == 0) { + btrfs_err(fs_info, + "invalid tree nritems, bytenr=%llu nritems=0 expect >0", + eb->start); + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + return -EUCLEAN; + } + if (found_level) btrfs_node_key_to_cpu(eb, &found_key, 0); else diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 8b7eb22d508a..ef2f80825c82 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5751,6 +5751,14 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info, */ if ((flags & extra) && !(block_group->flags & extra)) goto loop; + + /* + * This block group has different flags than we want. + * It's possible that we have MIXED_GROUP flag but no + * block group is mixed. Just skip such block group. + */ + btrfs_release_block_group(block_group, delalloc); + continue; } have_block_group: diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index eeb75281894e..3e0c8fcb658f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3745,11 +3745,20 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb static void set_btree_ioerr(struct page *page) { struct extent_buffer *eb = (struct extent_buffer *)page->private; + struct btrfs_fs_info *fs_info; SetPageError(page); if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) return; + /* + * If we error out, we should add back the dirty_metadata_bytes + * to make it consistent. + */ + fs_info = eb->fs_info; + percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, + eb->len, fs_info->dirty_metadata_batch); + /* * If writeback for a btree extent that doesn't belong to a log tree * failed, increment the counter transaction->eb_write_errors. diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 062be9dde4c6..52ad985cc7f9 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -764,7 +764,8 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, } else { ASSERT(num_bitmaps); num_bitmaps--; - e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS); + e->bitmap = kmem_cache_zalloc( + btrfs_free_space_bitmap_cachep, GFP_NOFS); if (!e->bitmap) { kmem_cache_free( btrfs_free_space_cachep, e); @@ -1881,7 +1882,7 @@ static void free_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info) { unlink_free_space(ctl, bitmap_info); - kfree(bitmap_info->bitmap); + kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap); kmem_cache_free(btrfs_free_space_cachep, bitmap_info); ctl->total_bitmaps--; ctl->op->recalc_thresholds(ctl); @@ -2135,7 +2136,8 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, } /* allocate the bitmap */ - info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS); + info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, + GFP_NOFS); spin_lock(&ctl->tree_lock); if (!info->bitmap) { ret = -ENOMEM; @@ -2146,7 +2148,9 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, out: if (info) { - kfree(info->bitmap); + if (info->bitmap) + kmem_cache_free(btrfs_free_space_bitmap_cachep, + info->bitmap); kmem_cache_free(btrfs_free_space_cachep, info); } @@ -2802,7 +2806,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, if (entry->bytes == 0) { ctl->free_extents--; if (entry->bitmap) { - kfree(entry->bitmap); + kmem_cache_free(btrfs_free_space_bitmap_cachep, + entry->bitmap); ctl->total_bitmaps--; ctl->op->recalc_thresholds(ctl); } @@ -3606,7 +3611,7 @@ int test_add_free_space_entry(struct btrfs_block_group_cache *cache, } if (!map) { - map = kzalloc(PAGE_SIZE, GFP_NOFS); + map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS); if (!map) { kmem_cache_free(btrfs_free_space_cachep, info); return -ENOMEM; @@ -3635,7 +3640,8 @@ int test_add_free_space_entry(struct btrfs_block_group_cache *cache, if (info) kmem_cache_free(btrfs_free_space_cachep, info); - kfree(map); + if (map) + kmem_cache_free(btrfs_free_space_bitmap_cachep, map); return 0; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ee582a36653d..d51d9466feb0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -74,6 +74,7 @@ static struct kmem_cache *btrfs_inode_cachep; struct kmem_cache *btrfs_trans_handle_cachep; struct kmem_cache *btrfs_path_cachep; struct kmem_cache *btrfs_free_space_cachep; +struct kmem_cache *btrfs_free_space_bitmap_cachep; static int btrfs_setsize(struct inode *inode, struct iattr *attr); static int btrfs_truncate(struct inode *inode, bool skip_writeback); @@ -9380,6 +9381,7 @@ void __cold btrfs_destroy_cachep(void) kmem_cache_destroy(btrfs_trans_handle_cachep); kmem_cache_destroy(btrfs_path_cachep); kmem_cache_destroy(btrfs_free_space_cachep); + kmem_cache_destroy(btrfs_free_space_bitmap_cachep); } int __init btrfs_init_cachep(void) @@ -9409,6 +9411,12 @@ int __init btrfs_init_cachep(void) if (!btrfs_free_space_cachep) goto fail; + btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap", + PAGE_SIZE, PAGE_SIZE, + SLAB_RED_ZONE, NULL); + if (!btrfs_free_space_bitmap_cachep) + goto fail; + return 0; fail: btrfs_destroy_cachep(); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index f8a3c1b0a15a..001efc9ba1e7 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -3154,9 +3154,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) btrfs_free_path(path); mutex_lock(&fs_info->qgroup_rescan_lock); - if (!btrfs_fs_closing(fs_info)) - fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; - if (err > 0 && fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; @@ -3172,16 +3169,30 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) trans = btrfs_start_transaction(fs_info->quota_root, 1); if (IS_ERR(trans)) { err = PTR_ERR(trans); + trans = NULL; btrfs_err(fs_info, "fail to start transaction for status update: %d", err); - goto done; } - ret = update_qgroup_status_item(trans); - if (ret < 0) { - err = ret; - btrfs_err(fs_info, "fail to update qgroup status: %d", err); + + mutex_lock(&fs_info->qgroup_rescan_lock); + if (!btrfs_fs_closing(fs_info)) + fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; + if (trans) { + ret = update_qgroup_status_item(trans); + if (ret < 0) { + err = ret; + btrfs_err(fs_info, "fail to update qgroup status: %d", + err); + } } + fs_info->qgroup_rescan_running = false; + complete_all(&fs_info->qgroup_rescan_completion); + mutex_unlock(&fs_info->qgroup_rescan_lock); + + if (!trans) + return; + btrfs_end_transaction(trans); if (btrfs_fs_closing(fs_info)) { @@ -3192,12 +3203,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) } else { btrfs_err(fs_info, "qgroup scan failed with %d", err); } - -done: - mutex_lock(&fs_info->qgroup_rescan_lock); - fs_info->qgroup_rescan_running = false; - mutex_unlock(&fs_info->qgroup_rescan_lock); - complete_all(&fs_info->qgroup_rescan_completion); } /* @@ -3425,6 +3430,9 @@ int btrfs_qgroup_reserve_data(struct inode *inode, while ((unode = ulist_next(&reserved->range_changed, &uiter))) clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val, unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL); + /* Also free data bytes of already reserved one */ + btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, + orig_reserved, BTRFS_QGROUP_RSV_DATA); extent_changeset_release(reserved); return ret; } @@ -3469,7 +3477,7 @@ static int qgroup_free_reserved_data(struct inode *inode, * EXTENT_QGROUP_RESERVED, we won't double free. * So not need to rush. */ - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree, + ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, free_start, free_start + free_len - 1, EXTENT_QGROUP_RESERVED, &changeset); if (ret < 0) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index ccd5706199d7..9634cae1e1b1 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -821,6 +821,95 @@ static int check_inode_item(struct extent_buffer *leaf, return 0; } +static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key, + int slot) +{ + struct btrfs_fs_info *fs_info = leaf->fs_info; + struct btrfs_root_item ri; + const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY | + BTRFS_ROOT_SUBVOL_DEAD; + + /* No such tree id */ + if (key->objectid == 0) { + generic_err(leaf, slot, "invalid root id 0"); + return -EUCLEAN; + } + + /* + * Some older kernel may create ROOT_ITEM with non-zero offset, so here + * we only check offset for reloc tree whose key->offset must be a + * valid tree. + */ + if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) { + generic_err(leaf, slot, "invalid root id 0 for reloc tree"); + return -EUCLEAN; + } + + if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) { + generic_err(leaf, slot, + "invalid root item size, have %u expect %zu", + btrfs_item_size_nr(leaf, slot), sizeof(ri)); + } + + read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot), + sizeof(ri)); + + /* Generation related */ + if (btrfs_root_generation(&ri) > + btrfs_super_generation(fs_info->super_copy) + 1) { + generic_err(leaf, slot, + "invalid root generation, have %llu expect (0, %llu]", + btrfs_root_generation(&ri), + btrfs_super_generation(fs_info->super_copy) + 1); + return -EUCLEAN; + } + if (btrfs_root_generation_v2(&ri) > + btrfs_super_generation(fs_info->super_copy) + 1) { + generic_err(leaf, slot, + "invalid root v2 generation, have %llu expect (0, %llu]", + btrfs_root_generation_v2(&ri), + btrfs_super_generation(fs_info->super_copy) + 1); + return -EUCLEAN; + } + if (btrfs_root_last_snapshot(&ri) > + btrfs_super_generation(fs_info->super_copy) + 1) { + generic_err(leaf, slot, + "invalid root last_snapshot, have %llu expect (0, %llu]", + btrfs_root_last_snapshot(&ri), + btrfs_super_generation(fs_info->super_copy) + 1); + return -EUCLEAN; + } + + /* Alignment and level check */ + if (!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize)) { + generic_err(leaf, slot, + "invalid root bytenr, have %llu expect to be aligned to %u", + btrfs_root_bytenr(&ri), fs_info->sectorsize); + return -EUCLEAN; + } + if (btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL) { + generic_err(leaf, slot, + "invalid root level, have %u expect [0, %u]", + btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1); + return -EUCLEAN; + } + if (ri.drop_level >= BTRFS_MAX_LEVEL) { + generic_err(leaf, slot, + "invalid root level, have %u expect [0, %u]", + ri.drop_level, BTRFS_MAX_LEVEL - 1); + return -EUCLEAN; + } + + /* Flags check */ + if (btrfs_root_flags(&ri) & ~valid_root_flags) { + generic_err(leaf, slot, + "invalid root flags, have 0x%llx expect mask 0x%llx", + btrfs_root_flags(&ri), valid_root_flags); + return -EUCLEAN; + } + return 0; +} + /* * Common point to switch the item-specific validation. */ @@ -856,6 +945,9 @@ static int check_leaf_item(struct extent_buffer *leaf, case BTRFS_INODE_ITEM_KEY: ret = check_inode_item(leaf, key, slot); break; + case BTRFS_ROOT_ITEM_KEY: + ret = check_root_item(leaf, key, slot); + break; } return ret; } @@ -899,6 +991,12 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data) owner); return -EUCLEAN; } + /* Unknown tree */ + if (owner == 0) { + generic_err(leaf, 0, + "invalid owner, root 0 is not defined"); + return -EUCLEAN; + } return 0; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a447d3ec48d5..e821a0e97cd8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4072,7 +4072,13 @@ int btrfs_balance(struct btrfs_fs_info *fs_info, } num_devices = btrfs_num_devices(fs_info); - allowed = 0; + + /* + * SINGLE profile on-disk has no profile bit, but in-memory we have a + * special bit for it, to make it easier to distinguish. Thus we need + * to set it manually, or balance would refuse the profile. + */ + allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) if (num_devices >= btrfs_raid_array[i].devs_min) allowed |= btrfs_raid_array[i].bg_flag; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 3289b566463f..64e33e7bff1e 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -433,6 +433,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root) cifs_show_security(s, tcon->ses); cifs_show_cache_flavor(s, cifs_sb); + if (tcon->no_lease) + seq_puts(s, ",nolease"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) seq_puts(s, ",multiuser"); else if (tcon->ses->user_name) diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index fe610e7e3670..5ef5a16c01d2 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -576,6 +576,7 @@ struct smb_vol { bool noblocksnd:1; bool noautotune:1; bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ + bool no_lease:1; /* disable requesting leases */ bool fsc:1; /* enable fscache */ bool mfsymlinks:1; /* use Minshall+French Symlinks */ bool multiuser:1; @@ -1082,6 +1083,7 @@ struct cifs_tcon { bool need_reopen_files:1; /* need to reopen tcon file handles */ bool use_resilient:1; /* use resilient instead of durable handles */ bool use_persistent:1; /* use persistent instead of durable handles */ + bool no_lease:1; /* Do not request leases on files or directories */ __le32 capabilities; __u32 share_flags; __u32 maximal_access; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 5299effa6f7d..8ee57d1f507f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -74,7 +74,7 @@ enum { Opt_user_xattr, Opt_nouser_xattr, Opt_forceuid, Opt_noforceuid, Opt_forcegid, Opt_noforcegid, - Opt_noblocksend, Opt_noautotune, + Opt_noblocksend, Opt_noautotune, Opt_nolease, Opt_hard, Opt_soft, Opt_perm, Opt_noperm, Opt_mapposix, Opt_nomapposix, Opt_mapchars, Opt_nomapchars, Opt_sfu, @@ -134,6 +134,7 @@ static const match_table_t cifs_mount_option_tokens = { { Opt_noforcegid, "noforcegid" }, { Opt_noblocksend, "noblocksend" }, { Opt_noautotune, "noautotune" }, + { Opt_nolease, "nolease" }, { Opt_hard, "hard" }, { Opt_soft, "soft" }, { Opt_perm, "perm" }, @@ -1713,6 +1714,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, case Opt_noautotune: vol->noautotune = 1; break; + case Opt_nolease: + vol->no_lease = 1; + break; case Opt_hard: vol->retry = 1; break; @@ -3250,6 +3254,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info) return 0; if (tcon->handle_timeout != volume_info->handle_timeout) return 0; + if (tcon->no_lease != volume_info->no_lease) + return 0; return 1; } @@ -3464,6 +3470,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) tcon->nocase = volume_info->nocase; tcon->nohandlecache = volume_info->nohandlecache; tcon->local_lease = volume_info->local_lease; + tcon->no_lease = volume_info->no_lease; INIT_LIST_HEAD(&tcon->pending_opens); spin_lock(&cifs_tcp_ses_lock); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 64a5864127be..7e8e8826c26f 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -656,6 +656,15 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid) return 0; } + /* + * We do not hold the lock for the open because in case + * SMB2_open needs to reconnect, it will end up calling + * cifs_mark_open_files_invalid() which takes the lock again + * thus causing a deadlock + */ + + mutex_unlock(&tcon->crfid.fid_mutex); + if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; @@ -677,7 +686,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid) rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path); if (rc) - goto oshr_exit; + goto oshr_free; smb2_set_next_command(tcon, &rqst[0]); memset(&qi_iov, 0, sizeof(qi_iov)); @@ -690,18 +699,10 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid) sizeof(struct smb2_file_all_info) + PATH_MAX * 2, 0, NULL); if (rc) - goto oshr_exit; + goto oshr_free; smb2_set_related(&rqst[1]); - /* - * We do not hold the lock for the open because in case - * SMB2_open needs to reconnect, it will end up calling - * cifs_mark_open_files_invalid() which takes the lock again - * thus causing a deadlock - */ - - mutex_unlock(&tcon->crfid.fid_mutex); rc = compound_send_recv(xid, ses, flags, 2, rqst, resp_buftype, rsp_iov); mutex_lock(&tcon->crfid.fid_mutex); @@ -742,6 +743,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid) if (rc) goto oshr_exit; + atomic_inc(&tcon->num_remote_opens); + o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; oparms.fid->persistent_fid = o_rsp->PersistentFileId; oparms.fid->volatile_fid = o_rsp->VolatileFileId; @@ -1167,6 +1170,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, rc = compound_send_recv(xid, ses, flags, 3, rqst, resp_buftype, rsp_iov); + /* no need to bump num_remote_opens because handle immediately closed */ sea_exit: kfree(ea); @@ -1488,6 +1492,8 @@ smb2_ioctl_query_info(const unsigned int xid, resp_buftype, rsp_iov); if (rc) goto iqinf_exit; + + /* No need to bump num_remote_opens since handle immediately closed */ if (qi.flags & PASSTHRU_FSCTL) { pqi = (struct smb_query_info __user *)arg; io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base; @@ -3295,6 +3301,11 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE) return; + /* Check if the server granted an oplock rather than a lease */ + if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE) + return smb2_set_oplock_level(cinode, oplock, epoch, + purge_cache); + if (oplock & SMB2_LEASE_READ_CACHING_HE) { new_oplock |= CIFS_CACHE_READ_FLG; strcat(message, "R"); diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 31e4a1b0b170..0aa40129dfb5 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -2351,6 +2351,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, rqst.rq_iov = iov; rqst.rq_nvec = n_iov; + /* no need to inc num_remote_opens because we close it just below */ trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES); /* resource #4: response buffer */ @@ -2458,7 +2459,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock, iov[1].iov_len = uni_path_len; iov[1].iov_base = path; - if (!server->oplocks) + if ((!server->oplocks) || (tcon->no_lease)) *oplock = SMB2_OPLOCK_LEVEL_NONE; if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 9076150758d8..db4ba8f6077e 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -31,7 +31,7 @@ #include "cifs_fs_sb.h" #include "cifs_unicode.h" -#define MAX_EA_VALUE_SIZE 65535 +#define MAX_EA_VALUE_SIZE CIFSMaxBufSize #define CIFS_XATTR_CIFS_ACL "system.cifs_acl" #define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */ #define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */ diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 92266a2da7d6..f203bf989a4c 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3813,8 +3813,8 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle, * illegal. */ if (ee_block != map->m_lblk || ee_len > map->m_len) { -#ifdef EXT4_DEBUG - ext4_warning("Inode (%ld) finished: extent logical block %llu," +#ifdef CONFIG_EXT4_DEBUG + ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu," " len %u; IO logical block %llu, len %u", inode->i_ino, (unsigned long long)ee_block, ee_len, (unsigned long long)map->m_lblk, map->m_len); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 006b7a2070bf..723b0d1a3881 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4297,6 +4297,15 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) trace_ext4_punch_hole(inode, offset, length, 0); + ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); + if (ext4_has_inline_data(inode)) { + down_write(&EXT4_I(inode)->i_mmap_sem); + ret = ext4_convert_inline_data(inode); + up_write(&EXT4_I(inode)->i_mmap_sem); + if (ret) + return ret; + } + /* * Write out all dirty pages to avoid race conditions * Then release them. diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ea8237513dfa..186468fba82e 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -377,7 +377,7 @@ static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req) req->in.h.len = sizeof(struct fuse_in_header) + len_args(req->in.numargs, (struct fuse_arg *) req->in.args); list_add_tail(&req->list, &fiq->pending); - wake_up_locked(&fiq->waitq); + wake_up(&fiq->waitq); kill_fasync(&fiq->fasync, SIGIO, POLL_IN); } @@ -389,16 +389,16 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, forget->forget_one.nodeid = nodeid; forget->forget_one.nlookup = nlookup; - spin_lock(&fiq->waitq.lock); + spin_lock(&fiq->lock); if (fiq->connected) { fiq->forget_list_tail->next = forget; fiq->forget_list_tail = forget; - wake_up_locked(&fiq->waitq); + wake_up(&fiq->waitq); kill_fasync(&fiq->fasync, SIGIO, POLL_IN); } else { kfree(forget); } - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); } static void flush_bg_queue(struct fuse_conn *fc) @@ -412,10 +412,10 @@ static void flush_bg_queue(struct fuse_conn *fc) req = list_first_entry(&fc->bg_queue, struct fuse_req, list); list_del(&req->list); fc->active_background++; - spin_lock(&fiq->waitq.lock); + spin_lock(&fiq->lock); req->in.h.unique = fuse_get_unique(fiq); queue_request(fiq, req); - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); } } @@ -439,9 +439,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) * smp_mb() from queue_interrupt(). */ if (!list_empty(&req->intr_entry)) { - spin_lock(&fiq->waitq.lock); + spin_lock(&fiq->lock); list_del_init(&req->intr_entry); - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); } WARN_ON(test_bit(FR_PENDING, &req->flags)); WARN_ON(test_bit(FR_SENT, &req->flags)); @@ -483,10 +483,10 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) { - spin_lock(&fiq->waitq.lock); + spin_lock(&fiq->lock); /* Check for we've sent request to interrupt this req */ if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) { - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); return -EINVAL; } @@ -499,13 +499,13 @@ static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) smp_mb(); if (test_bit(FR_FINISHED, &req->flags)) { list_del_init(&req->intr_entry); - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); return 0; } - wake_up_locked(&fiq->waitq); + wake_up(&fiq->waitq); kill_fasync(&fiq->fasync, SIGIO, POLL_IN); } - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); return 0; } @@ -535,16 +535,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) if (!err) return; - spin_lock(&fiq->waitq.lock); + spin_lock(&fiq->lock); /* Request is not yet in userspace, bail out */ if (test_bit(FR_PENDING, &req->flags)) { list_del(&req->list); - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); __fuse_put_request(req); req->out.h.error = -EINTR; return; } - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); } /* @@ -559,9 +559,9 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) struct fuse_iqueue *fiq = &fc->iq; BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); - spin_lock(&fiq->waitq.lock); + spin_lock(&fiq->lock); if (!fiq->connected) { - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); req->out.h.error = -ENOTCONN; } else { req->in.h.unique = fuse_get_unique(fiq); @@ -569,7 +569,7 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) /* acquire extra reference, since request is still needed after request_end() */ __fuse_get_request(req); - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); request_wait_answer(fc, req); /* Pairs with smp_wmb() in request_end() */ @@ -700,12 +700,12 @@ static int fuse_request_send_notify_reply(struct fuse_conn *fc, __clear_bit(FR_ISREPLY, &req->flags); req->in.h.unique = unique; - spin_lock(&fiq->waitq.lock); + spin_lock(&fiq->lock); if (fiq->connected) { queue_request(fiq, req); err = 0; } - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); return err; } @@ -1149,12 +1149,12 @@ static int request_pending(struct fuse_iqueue *fiq) * Unlike other requests this is assembled on demand, without a need * to allocate a separate fuse_req structure. * - * Called with fiq->waitq.lock held, releases it + * Called with fiq->lock held, releases it */ static int fuse_read_interrupt(struct fuse_iqueue *fiq, struct fuse_copy_state *cs, size_t nbytes, struct fuse_req *req) -__releases(fiq->waitq.lock) +__releases(fiq->lock) { struct fuse_in_header ih; struct fuse_interrupt_in arg; @@ -1169,7 +1169,7 @@ __releases(fiq->waitq.lock) ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT); arg.unique = req->in.h.unique; - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); if (nbytes < reqsize) return -EINVAL; @@ -1206,7 +1206,7 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq, static int fuse_read_single_forget(struct fuse_iqueue *fiq, struct fuse_copy_state *cs, size_t nbytes) -__releases(fiq->waitq.lock) +__releases(fiq->lock) { int err; struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL); @@ -1220,7 +1220,7 @@ __releases(fiq->waitq.lock) .len = sizeof(ih) + sizeof(arg), }; - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); kfree(forget); if (nbytes < ih.len) return -EINVAL; @@ -1238,7 +1238,7 @@ __releases(fiq->waitq.lock) static int fuse_read_batch_forget(struct fuse_iqueue *fiq, struct fuse_copy_state *cs, size_t nbytes) -__releases(fiq->waitq.lock) +__releases(fiq->lock) { int err; unsigned max_forgets; @@ -1252,13 +1252,13 @@ __releases(fiq->waitq.lock) }; if (nbytes < ih.len) { - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); return -EINVAL; } max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); head = dequeue_forget(fiq, max_forgets, &count); - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); arg.count = count; ih.len += count * sizeof(struct fuse_forget_one); @@ -1288,7 +1288,7 @@ __releases(fiq->waitq.lock) static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, struct fuse_copy_state *cs, size_t nbytes) -__releases(fiq->waitq.lock) +__releases(fiq->lock) { if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) return fuse_read_single_forget(fiq, cs, nbytes); @@ -1318,16 +1318,19 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, unsigned int hash; restart: - spin_lock(&fiq->waitq.lock); - err = -EAGAIN; - if ((file->f_flags & O_NONBLOCK) && fiq->connected && - !request_pending(fiq)) - goto err_unlock; + for (;;) { + spin_lock(&fiq->lock); + if (!fiq->connected || request_pending(fiq)) + break; + spin_unlock(&fiq->lock); - err = wait_event_interruptible_exclusive_locked(fiq->waitq, + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + err = wait_event_interruptible_exclusive(fiq->waitq, !fiq->connected || request_pending(fiq)); - if (err) - goto err_unlock; + if (err) + return err; + } if (!fiq->connected) { err = fc->aborted ? -ECONNABORTED : -ENODEV; @@ -1351,7 +1354,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, req = list_entry(fiq->pending.next, struct fuse_req, list); clear_bit(FR_PENDING, &req->flags); list_del_init(&req->list); - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); in = &req->in; reqsize = in->h.len; @@ -1409,7 +1412,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, return err; err_unlock: - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); return err; } @@ -2121,12 +2124,12 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait) fiq = &fud->fc->iq; poll_wait(file, &fiq->waitq, wait); - spin_lock(&fiq->waitq.lock); + spin_lock(&fiq->lock); if (!fiq->connected) mask = EPOLLERR; else if (request_pending(fiq)) mask |= EPOLLIN | EPOLLRDNORM; - spin_unlock(&fiq->waitq.lock); + spin_unlock(&fiq->lock); return mask; } @@ -2221,15 +2224,15 @@ void fuse_abort_conn(struct fuse_conn *fc) flush_bg_queue(fc); spin_unlock(&fc->bg_lock); - spin_lock(&fiq->waitq.lock); + spin_lock(&fiq->lock); fiq->connected = 0; list_for_each_entry(req, &fiq->pending, list) clear_bit(FR_PENDING, &req->flags); list_splice_tail_init(&fiq->pending, &to_end); while (forget_pending(fiq)) kfree(dequeue_forget(fiq, 1, NULL)); - wake_up_all_locked(&fiq->waitq); - spin_unlock(&fiq->waitq.lock); + wake_up_all(&fiq->waitq); + spin_unlock(&fiq->lock); kill_fasync(&fiq->fasync, SIGIO, POLL_IN); end_polls(fc); wake_up_all(&fc->blocked_waitq); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 5ae2828beb00..91c99724dee0 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1767,6 +1767,7 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc) WARN_ON(wbc->sync_mode == WB_SYNC_ALL); redirty_page_for_writepage(wbc, page); + unlock_page(page); return 0; } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 24dbca777775..89bdc41e0d86 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -450,6 +450,9 @@ struct fuse_iqueue { /** Connection established */ unsigned connected; + /** Lock protecting accesses to members of this structure */ + spinlock_t lock; + /** Readers of the connection are waiting on this */ wait_queue_head_t waitq; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 4bb885b0f032..987877860c01 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -582,6 +582,7 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root) static void fuse_iqueue_init(struct fuse_iqueue *fiq) { memset(fiq, 0, sizeof(struct fuse_iqueue)); + spin_lock_init(&fiq->lock); init_waitqueue_head(&fiq->waitq); INIT_LIST_HEAD(&fiq->pending); INIT_LIST_HEAD(&fiq->interrupts); diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c index 574d03f8a573..b2da3de6a78e 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -372,11 +372,13 @@ static enum fuse_parse_result fuse_parse_cache(struct fuse_file *ff, for (;;) { struct fuse_dirent *dirent = addr + offset; unsigned int nbytes = size - offset; - size_t reclen = FUSE_DIRENT_SIZE(dirent); + size_t reclen; if (nbytes < FUSE_NAME_OFFSET || !dirent->namelen) break; + reclen = FUSE_DIRENT_SIZE(dirent); /* derefs ->namelen */ + if (WARN_ON(dirent->namelen > FUSE_NAME_MAX)) return FOUND_ERR; if (WARN_ON(reclen > nbytes)) diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 4f8b5fd6c81f..b7ba5e194965 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1680,6 +1680,7 @@ static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh, brelse(dibh); up_write(&ip->i_rw_mutex); gfs2_trans_end(sdp); + buf_in_tr = false; } gfs2_glock_dq_uninit(rd_gh); cond_resched(); diff --git a/fs/io_uring.c b/fs/io_uring.c index cfb48bd088e1..06d048341fa4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -288,6 +288,7 @@ struct io_ring_ctx { struct sqe_submit { const struct io_uring_sqe *sqe; unsigned short index; + u32 sequence; bool has_user; bool needs_lock; bool needs_fixed_file; @@ -2040,7 +2041,7 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s, if (flags & IOSQE_IO_DRAIN) { req->flags |= REQ_F_IO_DRAIN; - req->sequence = ctx->cached_sq_head - 1; + req->sequence = s->sequence; } if (!io_op_needs_file(s->sqe)) @@ -2247,6 +2248,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s) if (head < ctx->sq_entries) { s->index = head; s->sqe = &ctx->sq_sqes[head]; + s->sequence = ctx->cached_sq_head; ctx->cached_sq_head++; return true; } diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c index cb8ec1f65c03..73c9775215b3 100644 --- a/fs/overlayfs/export.c +++ b/fs/overlayfs/export.c @@ -227,9 +227,8 @@ static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen) /* Encode an upper or lower file handle */ fh = ovl_encode_real_fh(enc_lower ? ovl_dentry_lower(dentry) : ovl_dentry_upper(dentry), !enc_lower); - err = PTR_ERR(fh); if (IS_ERR(fh)) - goto fail; + return PTR_ERR(fh); err = -EOVERFLOW; if (fh->len > buflen) diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 7663aeb85fa3..bc14781886bf 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -383,7 +383,8 @@ static bool ovl_can_list(const char *s) return true; /* Never list trusted.overlay, list other trusted for superuser only */ - return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN); + return !ovl_is_private_xattr(s) && + ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN); } ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 28101bbc0b78..d952d5962e93 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -28,6 +28,7 @@ #include <linux/falloc.h> #include <linux/backing-dev.h> #include <linux/mman.h> +#include <linux/fadvise.h> static const struct vm_operations_struct xfs_file_vm_ops; @@ -933,6 +934,30 @@ xfs_file_fallocate( return error; } +STATIC int +xfs_file_fadvise( + struct file *file, + loff_t start, + loff_t end, + int advice) +{ + struct xfs_inode *ip = XFS_I(file_inode(file)); + int ret; + int lockflags = 0; + + /* + * Operations creating pages in page cache need protection from hole + * punching and similar ops + */ + if (advice == POSIX_FADV_WILLNEED) { + lockflags = XFS_IOLOCK_SHARED; + xfs_ilock(ip, lockflags); + } + ret = generic_fadvise(file, start, end, advice); + if (lockflags) + xfs_iunlock(ip, lockflags); + return ret; +} STATIC loff_t xfs_file_remap_range( @@ -1232,6 +1257,7 @@ const struct file_operations xfs_file_operations = { .fsync = xfs_file_fsync, .get_unmapped_area = thp_get_unmapped_area, .fallocate = xfs_file_fallocate, + .fadvise = xfs_file_fadvise, .remap_file_range = xfs_file_remap_range, }; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 3fa1fa59f9b2..ab25e69a15d1 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -140,6 +140,7 @@ typedef int (poll_fn)(struct blk_mq_hw_ctx *); typedef int (map_queues_fn)(struct blk_mq_tag_set *set); typedef bool (busy_fn)(struct request_queue *); typedef void (complete_fn)(struct request *); +typedef void (cleanup_rq_fn)(struct request *); struct blk_mq_ops { @@ -200,6 +201,12 @@ struct blk_mq_ops { /* Called from inside blk_get_request() */ void (*initialize_rq_fn)(struct request *rq); + /* + * Called before freeing one request which isn't completed yet, + * and usually for freeing the driver private data + */ + cleanup_rq_fn *cleanup_rq; + /* * If set, returns whether or not this queue currently is busy */ @@ -366,4 +373,10 @@ static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, BLK_QC_T_INTERNAL; } +static inline void blk_mq_cleanup_rq(struct request *rq) +{ + if (rq->q->mq_ops->cleanup_rq) + rq->q->mq_ops->cleanup_rq(rq); +} + #endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1ef375dafb1c..ae51050c5094 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -202,9 +202,12 @@ struct request { #ifdef CONFIG_BLK_WBT unsigned short wbt_flags; #endif -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW - unsigned short throtl_size; -#endif + /* + * rq sectors used for blk stats. It has the same value + * with blk_rq_sectors(rq), except that it never be zeroed + * by completion. + */ + unsigned short stats_sectors; /* * Number of scatter-gather DMA addr+len pairs after @@ -903,6 +906,7 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) * blk_rq_err_bytes() : bytes left till the next error boundary * blk_rq_sectors() : sectors left in the entire request * blk_rq_cur_sectors() : sectors left in the current segment + * blk_rq_stats_sectors() : sectors of the entire request used for stats */ static inline sector_t blk_rq_pos(const struct request *rq) { @@ -931,6 +935,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; } +static inline unsigned int blk_rq_stats_sectors(const struct request *rq) +{ + return rq->stats_sectors; +} + #ifdef CONFIG_BLK_DEV_ZONED static inline unsigned int blk_rq_zone_no(struct request *rq) { diff --git a/include/linux/bug.h b/include/linux/bug.h index fe5916550da8..f639bd0122f3 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -47,6 +47,11 @@ void generic_bug_clear_once(void); #else /* !CONFIG_GENERIC_BUG */ +static inline void *find_bug(unsigned long bugaddr) +{ + return NULL; +} + static inline enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 997a530ff4e9..bc1b40fb0db7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3531,6 +3531,8 @@ extern void inode_nohighmem(struct inode *inode); /* mm/fadvise.c */ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice); +extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, + int advice); #if defined(CONFIG_IO_URING) extern struct sock *io_uring_get_socket(struct file *file); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 4a351cb7f20f..cf87c673cbb8 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -493,6 +493,15 @@ void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq); void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq); +/* + * May be called from host driver's system/runtime suspend/resume callbacks, + * to know if SDIO IRQs has been claimed. + */ +static inline bool sdio_irq_claimed(struct mmc_host *host) +{ + return host->sdio_irqs > 0; +} + static inline void mmc_signal_sdio_irq(struct mmc_host *host) { host->ops->enable_sdio_irq(host, 0); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c842735a4f45..4b97f427cc92 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -548,6 +548,7 @@ #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 +#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index dc905a4ff8d7..185d94829701 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -22,7 +22,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb) /* i_mutex must being held */ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia) { - return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) || + return (ia->ia_valid & ATTR_SIZE) || (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) || (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid)); } diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 13e108bcc9eb..d783e15ba898 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -352,6 +352,7 @@ bool xprt_prepare_transmit(struct rpc_task *task); void xprt_request_enqueue_transmit(struct rpc_task *task); void xprt_request_enqueue_receive(struct rpc_task *task); void xprt_request_wait_receive(struct rpc_task *task); +void xprt_request_dequeue_xprt(struct rpc_task *task); bool xprt_request_need_retransmit(struct rpc_task *task); void xprt_transmit(struct rpc_task *task); void xprt_end_transmit(struct rpc_task *task); diff --git a/include/net/route.h b/include/net/route.h index dfce19c9fa96..6c516840380d 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -53,10 +53,11 @@ struct rtable { unsigned int rt_flags; __u16 rt_type; __u8 rt_is_input; - u8 rt_gw_family; + __u8 rt_uses_gateway; int rt_iif; + u8 rt_gw_family; /* Info on neighbour */ union { __be32 rt_gw4; diff --git a/kernel/jump_label.c b/kernel/jump_label.c index df3008419a1d..cdb3ffab128b 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -407,7 +407,9 @@ static bool jump_label_can_update(struct jump_entry *entry, bool init) return false; if (!kernel_text_address(jump_entry_code(entry))) { - WARN_ONCE(1, "can't patch jump_label at %pS", (void *)jump_entry_code(entry)); + WARN_ONCE(!jump_entry_is_init(entry), + "can't patch jump_label at %pS", + (void *)jump_entry_code(entry)); return false; } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index d9770a5393c8..ebe8315a756a 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1514,7 +1514,8 @@ static int check_kprobe_address_safe(struct kprobe *p, /* Ensure it is not in reserved area nor out of text */ if (!kernel_text_address((unsigned long) p->addr) || within_kprobe_blacklist((unsigned long) p->addr) || - jump_label_text_reserved(p->addr, p->addr)) { + jump_label_text_reserved(p->addr, p->addr) || + find_bug((unsigned long)p->addr)) { ret = -EINVAL; goto out; } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 1888f6a3b694..424abf802f02 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -3274,7 +3274,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, /* move first record forward until length fits into the buffer */ seq = dumper->cur_seq; idx = dumper->cur_idx; - while (l > size && seq < dumper->next_seq) { + while (l >= size && seq < dumper->next_seq) { struct printk_log *msg = log_from_idx(idx); l -= msg_print_text(msg, true, time, NULL, 0); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a14e5fbbea46..5efdce756fdf 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3234,13 +3234,13 @@ static int __init rcu_spawn_gp_kthread(void) t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) return 0; - rnp = rcu_get_root(); - raw_spin_lock_irqsave_rcu_node(rnp, flags); - rcu_state.gp_kthread = t; if (kthread_prio) { sp.sched_priority = kthread_prio; sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); } + rnp = rcu_get_root(); + raw_spin_lock_irqsave_rcu_node(rnp, flags); + rcu_state.gp_kthread = t; raw_spin_unlock_irqrestore_rcu_node(rnp, flags); wake_up_process(t); rcu_spawn_nocb_kthreads(); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index af7e7b9c86af..513b403b683b 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -792,6 +792,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp) */ void synchronize_rcu_expedited(void) { + bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT); struct rcu_exp_work rew; struct rcu_node *rnp; unsigned long s; @@ -817,7 +818,7 @@ void synchronize_rcu_expedited(void) return; /* Someone else did our work for us. */ /* Ensure that load happens before action based on it. */ - if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { + if (unlikely(boottime)) { /* Direct call during scheduler init and early_initcalls(). */ rcu_exp_sel_wait_wake(s); } else { @@ -835,5 +836,8 @@ void synchronize_rcu_expedited(void) /* Let the next expedited grace period start. */ mutex_unlock(&rcu_state.exp_mutex); + + if (likely(!boottime)) + destroy_work_on_stack(&rew.rew_work); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index df9f1fe5689b..d38f007afea7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3486,8 +3486,36 @@ void scheduler_tick(void) struct tick_work { int cpu; + atomic_t state; struct delayed_work work; }; +/* Values for ->state, see diagram below. */ +#define TICK_SCHED_REMOTE_OFFLINE 0 +#define TICK_SCHED_REMOTE_OFFLINING 1 +#define TICK_SCHED_REMOTE_RUNNING 2 + +/* + * State diagram for ->state: + * + * + * TICK_SCHED_REMOTE_OFFLINE + * | ^ + * | | + * | | sched_tick_remote() + * | | + * | | + * +--TICK_SCHED_REMOTE_OFFLINING + * | ^ + * | | + * sched_tick_start() | | sched_tick_stop() + * | | + * V | + * TICK_SCHED_REMOTE_RUNNING + * + * + * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() + * and sched_tick_start() are happy to leave the state in RUNNING. + */ static struct tick_work __percpu *tick_work_cpu; @@ -3500,6 +3528,7 @@ static void sched_tick_remote(struct work_struct *work) struct task_struct *curr; struct rq_flags rf; u64 delta; + int os; /* * Handle the tick only if it appears the remote CPU is running in full @@ -3513,7 +3542,7 @@ static void sched_tick_remote(struct work_struct *work) rq_lock_irq(rq, &rf); curr = rq->curr; - if (is_idle_task(curr)) + if (is_idle_task(curr) || cpu_is_offline(cpu)) goto out_unlock; update_rq_clock(rq); @@ -3533,13 +3562,18 @@ static void sched_tick_remote(struct work_struct *work) /* * Run the remote tick once per second (1Hz). This arbitrary * frequency is large enough to avoid overload but short enough - * to keep scheduler internal stats reasonably up to date. + * to keep scheduler internal stats reasonably up to date. But + * first update state to reflect hotplug activity if required. */ - queue_delayed_work(system_unbound_wq, dwork, HZ); + os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); + WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); + if (os == TICK_SCHED_REMOTE_RUNNING) + queue_delayed_work(system_unbound_wq, dwork, HZ); } static void sched_tick_start(int cpu) { + int os; struct tick_work *twork; if (housekeeping_cpu(cpu, HK_FLAG_TICK)) @@ -3548,15 +3582,20 @@ static void sched_tick_start(int cpu) WARN_ON_ONCE(!tick_work_cpu); twork = per_cpu_ptr(tick_work_cpu, cpu); - twork->cpu = cpu; - INIT_DELAYED_WORK(&twork->work, sched_tick_remote); - queue_delayed_work(system_unbound_wq, &twork->work, HZ); + os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); + WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); + if (os == TICK_SCHED_REMOTE_OFFLINE) { + twork->cpu = cpu; + INIT_DELAYED_WORK(&twork->work, sched_tick_remote); + queue_delayed_work(system_unbound_wq, &twork->work, HZ); + } } #ifdef CONFIG_HOTPLUG_CPU static void sched_tick_stop(int cpu) { struct tick_work *twork; + int os; if (housekeeping_cpu(cpu, HK_FLAG_TICK)) return; @@ -3564,7 +3603,10 @@ static void sched_tick_stop(int cpu) WARN_ON_ONCE(!tick_work_cpu); twork = per_cpu_ptr(tick_work_cpu, cpu); - cancel_delayed_work_sync(&twork->work); + /* There cannot be competing actions, but don't rely on stop-machine. */ + os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); + WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); + /* Don't cancel, as this would mess up the state machine. */ } #endif /* CONFIG_HOTPLUG_CPU */ @@ -3572,7 +3614,6 @@ int __init sched_tick_offload_init(void) { tick_work_cpu = alloc_percpu(struct tick_work); BUG_ON(!tick_work_cpu); - return 0; } @@ -6939,10 +6980,6 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) #ifdef CONFIG_RT_GROUP_SCHED if (!sched_rt_can_attach(css_tg(css), task)) return -EINVAL; -#else - /* We don't support RT-tasks being in separate groups */ - if (task->sched_class != &fair_sched_class) - return -EINVAL; #endif /* * Serialize against wake_up_new_task() such that if its diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 867b4bb6d4be..b03ca2f73713 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -117,6 +117,7 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time, unsigned int next_freq) { struct cpufreq_policy *policy = sg_policy->policy; + int cpu; if (!sugov_update_next_freq(sg_policy, time, next_freq)) return; @@ -126,7 +127,11 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time, return; policy->cur = next_freq; - trace_cpu_frequency(next_freq, smp_processor_id()); + + if (trace_cpu_frequency_enabled()) { + for_each_cpu(cpu, policy->cpus) + trace_cpu_frequency(next_freq, cpu); + } } static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time, diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 46122edd8552..20951112b6cd 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -529,6 +529,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq); static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) { struct rq *later_rq = NULL; + struct dl_bw *dl_b; later_rq = find_lock_later_rq(p, rq); if (!later_rq) { @@ -557,6 +558,38 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p double_lock_balance(rq, later_rq); } + if (p->dl.dl_non_contending || p->dl.dl_throttled) { + /* + * Inactive timer is armed (or callback is running, but + * waiting for us to release rq locks). In any case, when it + * will fire (or continue), it will see running_bw of this + * task migrated to later_rq (and correctly handle it). + */ + sub_running_bw(&p->dl, &rq->dl); + sub_rq_bw(&p->dl, &rq->dl); + + add_rq_bw(&p->dl, &later_rq->dl); + add_running_bw(&p->dl, &later_rq->dl); + } else { + sub_rq_bw(&p->dl, &rq->dl); + add_rq_bw(&p->dl, &later_rq->dl); + } + + /* + * And we finally need to fixup root_domain(s) bandwidth accounting, + * since p is still hanging out in the old (now moved to default) root + * domain. + */ + dl_b = &rq->rd->dl_bw; + raw_spin_lock(&dl_b->lock); + __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); + raw_spin_unlock(&dl_b->lock); + + dl_b = &later_rq->rd->dl_bw; + raw_spin_lock(&dl_b->lock); + __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); + raw_spin_unlock(&dl_b->lock); + set_task_cpu(p, later_rq->cpu); double_unlock_balance(later_rq, rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 500f5db0de0b..86cfc5d5129c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9052,9 +9052,10 @@ static int load_balance(int this_cpu, struct rq *this_rq, out_balanced: /* * We reach balance although we may have faced some affinity - * constraints. Clear the imbalance flag if it was set. + * constraints. Clear the imbalance flag only if other tasks got + * a chance to move and fix the imbalance. */ - if (sd_parent) { + if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { int *group_imbalance = &sd_parent->groups->sgc->imbalance; if (*group_imbalance) @@ -10300,18 +10301,18 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) void online_fair_sched_group(struct task_group *tg) { struct sched_entity *se; + struct rq_flags rf; struct rq *rq; int i; for_each_possible_cpu(i) { rq = cpu_rq(i); se = tg->se[i]; - - raw_spin_lock_irq(&rq->lock); + rq_lock_irq(rq, &rf); update_rq_clock(rq); attach_entity_cfs_rq(se); sync_throttle(tg, i); - raw_spin_unlock_irq(&rq->lock); + rq_unlock_irq(rq, &rf); } } diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 80940939b733..e4bc4aa739b8 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -241,13 +241,14 @@ static void do_idle(void) check_pgt_cache(); rmb(); + local_irq_disable(); + if (cpu_is_offline(cpu)) { - tick_nohz_idle_stop_tick_protected(); + tick_nohz_idle_stop_tick(); cpuhp_report_idle_dead(); arch_cpu_idle_dead(); } - local_irq_disable(); arch_cpu_idle_enter(); /* diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 6e52b67b420e..517e3719027e 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1198,7 +1198,7 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf, if (static_branch_likely(&psi_disabled)) return -EOPNOTSUPP; - buf_size = min(nbytes, (sizeof(buf) - 1)); + buf_size = min(nbytes, sizeof(buf)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 57518efc3810..b7d75a9e8ccf 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -672,7 +672,7 @@ static int alarm_timer_create(struct k_itimer *new_timer) enum alarmtimer_type type; if (!alarmtimer_get_rtcdev()) - return -ENOTSUPP; + return -EOPNOTSUPP; if (!capable(CAP_WAKE_ALARM)) return -EPERM; @@ -790,7 +790,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, int ret = 0; if (!alarmtimer_get_rtcdev()) - return -ENOTSUPP; + return -EOPNOTSUPP; if (flags & ~TIMER_ABSTIME) return -EINVAL; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 0a426f4e3125..5bbad147a90c 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -375,7 +375,8 @@ static int posix_cpu_timer_del(struct k_itimer *timer) struct sighand_struct *sighand; struct task_struct *p = timer->it.cpu.task; - WARN_ON_ONCE(p == NULL); + if (WARN_ON_ONCE(!p)) + return -EINVAL; /* * Protect against sighand release/switch in exit/exec and process/ @@ -580,7 +581,8 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, u64 old_expires, new_expires, old_incr, val; int ret; - WARN_ON_ONCE(p == NULL); + if (WARN_ON_ONCE(!p)) + return -EINVAL; /* * Use the to_ktime conversion because that clamps the maximum @@ -715,10 +717,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp) { - u64 now; struct task_struct *p = timer->it.cpu.task; + u64 now; - WARN_ON_ONCE(p == NULL); + if (WARN_ON_ONCE(!p)) + return; /* * Easy part: convert the reload time. @@ -1000,12 +1003,13 @@ static void check_process_timers(struct task_struct *tsk, */ static void posix_cpu_timer_rearm(struct k_itimer *timer) { + struct task_struct *p = timer->it.cpu.task; struct sighand_struct *sighand; unsigned long flags; - struct task_struct *p = timer->it.cpu.task; u64 now; - WARN_ON_ONCE(p == NULL); + if (WARN_ON_ONCE(!p)) + return; /* * Fetch the current sample and update the timer's expiry time. @@ -1202,7 +1206,9 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, u64 now; int ret; - WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); + if (WARN_ON_ONCE(clock_idx >= CPUCLOCK_SCHED)) + return; + ret = cpu_timer_sample_group(clock_idx, tsk, &now); if (oldval && ret != -EINVAL) { diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index ba16c08e8cb9..717c940112f9 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -83,17 +83,19 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, ALIGN((uintptr_t)ir, 4)) && (ir < limit) && (*ir == 0)) ir++; - for (; (ir + 4) <= limit; ir += 4) { - dv = *((u32 *)ir); - if (dv) { + if (IS_ALIGNED((uintptr_t)ir, 4)) { + for (; (ir + 4) <= limit; ir += 4) { + dv = *((u32 *)ir); + if (dv) { # if defined(__LITTLE_ENDIAN) - ir += __builtin_ctz(dv) >> 3; + ir += __builtin_ctz(dv) >> 3; # elif defined(__BIG_ENDIAN) - ir += __builtin_clz(dv) >> 3; + ir += __builtin_clz(dv) >> 3; # else # error "missing endian definition" # endif - break; + break; + } } } #endif diff --git a/mm/compaction.c b/mm/compaction.c index 952dc2fb24e5..1e994920e6ff 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2078,6 +2078,17 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) const bool sync = cc->mode != MIGRATE_ASYNC; bool update_cached; + /* + * These counters track activities during zone compaction. Initialize + * them before compacting a new zone. + */ + cc->total_migrate_scanned = 0; + cc->total_free_scanned = 0; + cc->nr_migratepages = 0; + cc->nr_freepages = 0; + INIT_LIST_HEAD(&cc->freepages); + INIT_LIST_HEAD(&cc->migratepages); + cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, cc->classzone_idx); @@ -2281,10 +2292,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, { enum compact_result ret; struct compact_control cc = { - .nr_freepages = 0, - .nr_migratepages = 0, - .total_migrate_scanned = 0, - .total_free_scanned = 0, .order = order, .search_order = order, .gfp_mask = gfp_mask, @@ -2305,8 +2312,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, if (capture) current->capture_control = &capc; - INIT_LIST_HEAD(&cc.freepages); - INIT_LIST_HEAD(&cc.migratepages); ret = compact_zone(&cc, &capc); @@ -2408,8 +2413,6 @@ static void compact_node(int nid) struct zone *zone; struct compact_control cc = { .order = -1, - .total_migrate_scanned = 0, - .total_free_scanned = 0, .mode = MIGRATE_SYNC, .ignore_skip_hint = true, .whole_zone = true, @@ -2423,11 +2426,7 @@ static void compact_node(int nid) if (!populated_zone(zone)) continue; - cc.nr_freepages = 0; - cc.nr_migratepages = 0; cc.zone = zone; - INIT_LIST_HEAD(&cc.freepages); - INIT_LIST_HEAD(&cc.migratepages); compact_zone(&cc, NULL); @@ -2529,8 +2528,6 @@ static void kcompactd_do_work(pg_data_t *pgdat) struct compact_control cc = { .order = pgdat->kcompactd_max_order, .search_order = pgdat->kcompactd_max_order, - .total_migrate_scanned = 0, - .total_free_scanned = 0, .classzone_idx = pgdat->kcompactd_classzone_idx, .mode = MIGRATE_SYNC_LIGHT, .ignore_skip_hint = false, @@ -2554,16 +2551,10 @@ static void kcompactd_do_work(pg_data_t *pgdat) COMPACT_CONTINUE) continue; - cc.nr_freepages = 0; - cc.nr_migratepages = 0; - cc.total_migrate_scanned = 0; - cc.total_free_scanned = 0; - cc.zone = zone; - INIT_LIST_HEAD(&cc.freepages); - INIT_LIST_HEAD(&cc.migratepages); - if (kthread_should_stop()) return; + + cc.zone = zone; status = compact_zone(&cc, NULL); if (status == COMPACT_SUCCESS) { diff --git a/mm/fadvise.c b/mm/fadvise.c index 467bcd032037..4f17c83db575 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -27,8 +27,7 @@ * deactivate the pages and clear PG_Referenced. */ -static int generic_fadvise(struct file *file, loff_t offset, loff_t len, - int advice) +int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice) { struct inode *inode; struct address_space *mapping; @@ -178,6 +177,7 @@ static int generic_fadvise(struct file *file, loff_t offset, loff_t len, } return 0; } +EXPORT_SYMBOL(generic_fadvise); int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice) { diff --git a/mm/madvise.c b/mm/madvise.c index 968df3aa069f..bac973b9f2cc 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -14,6 +14,7 @@ #include <linux/userfaultfd_k.h> #include <linux/hugetlb.h> #include <linux/falloc.h> +#include <linux/fadvise.h> #include <linux/sched.h> #include <linux/ksm.h> #include <linux/fs.h> @@ -275,6 +276,7 @@ static long madvise_willneed(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct file *file = vma->vm_file; + loff_t offset; *prev = vma; #ifdef CONFIG_SWAP @@ -298,12 +300,20 @@ static long madvise_willneed(struct vm_area_struct *vma, return 0; } - start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - if (end > vma->vm_end) - end = vma->vm_end; - end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - - force_page_cache_readahead(file->f_mapping, file, start, end - start); + /* + * Filesystem's fadvise may need to take various locks. We need to + * explicitly grab a reference because the vma (and hence the + * vma's reference to the file) can go away as soon as we drop + * mmap_sem. + */ + *prev = NULL; /* tell sys_madvise we drop mmap_sem */ + get_file(file); + up_read(¤t->mm->mmap_sem); + offset = (loff_t)(start - vma->vm_start) + + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); + vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); + fput(file); + down_read(¤t->mm->mmap_sem); return 0; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9ec5e12486a7..e18108b2b786 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2821,6 +2821,16 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { + + /* + * Enforce __GFP_NOFAIL allocation because callers are not + * prepared to see failures and likely do not have any failure + * handling code. + */ + if (gfp & __GFP_NOFAIL) { + page_counter_charge(&memcg->kmem, nr_pages); + return 0; + } cancel_charge(memcg, nr_pages); return -ENOMEM; } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index eda2e2a0bdc6..26804abe99d6 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -1068,9 +1068,10 @@ bool out_of_memory(struct oom_control *oc) * The OOM killer does not compensate for IO-less reclaim. * pagefault_out_of_memory lost its gfp context so we have to * make sure exclude 0 mask - all other users should have at least - * ___GFP_DIRECT_RECLAIM to get here. + * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to + * invoke the OOM killer even if it is a GFP_NOFS allocation. */ - if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS)) + if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) return true; /* diff --git a/mm/z3fold.c b/mm/z3fold.c index ed19d98c9dcd..05bdf90646e7 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -295,14 +295,11 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool) } /* Initializes the z3fold header of a newly allocated z3fold page */ -static struct z3fold_header *init_z3fold_page(struct page *page, +static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, struct z3fold_pool *pool, gfp_t gfp) { struct z3fold_header *zhdr = page_address(page); - struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp); - - if (!slots) - return NULL; + struct z3fold_buddy_slots *slots; INIT_LIST_HEAD(&page->lru); clear_bit(PAGE_HEADLESS, &page->private); @@ -310,6 +307,12 @@ static struct z3fold_header *init_z3fold_page(struct page *page, clear_bit(NEEDS_COMPACTING, &page->private); clear_bit(PAGE_STALE, &page->private); clear_bit(PAGE_CLAIMED, &page->private); + if (headless) + return zhdr; + + slots = alloc_slots(pool, gfp); + if (!slots) + return NULL; spin_lock_init(&zhdr->page_lock); kref_init(&zhdr->refcount); @@ -366,9 +369,10 @@ static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) * Encodes the handle of a particular buddy within a z3fold page * Pool lock should be held as this function accesses first_num */ -static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) +static unsigned long __encode_handle(struct z3fold_header *zhdr, + struct z3fold_buddy_slots *slots, + enum buddy bud) { - struct z3fold_buddy_slots *slots; unsigned long h = (unsigned long)zhdr; int idx = 0; @@ -385,11 +389,15 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) if (bud == LAST) h |= (zhdr->last_chunks << BUDDY_SHIFT); - slots = zhdr->slots; slots->slot[idx] = h; return (unsigned long)&slots->slot[idx]; } +static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) +{ + return __encode_handle(zhdr, zhdr->slots, bud); +} + /* Returns the z3fold page where a given handle is stored */ static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h) { @@ -624,6 +632,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked) } if (unlikely(PageIsolated(page) || + test_bit(PAGE_CLAIMED, &page->private) || test_bit(PAGE_STALE, &page->private))) { z3fold_page_unlock(zhdr); return; @@ -924,7 +933,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, if (!page) return -ENOMEM; - zhdr = init_z3fold_page(page, pool, gfp); + zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); if (!zhdr) { __free_page(page); return -ENOMEM; @@ -1100,6 +1109,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) struct z3fold_header *zhdr = NULL; struct page *page = NULL; struct list_head *pos; + struct z3fold_buddy_slots slots; unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; spin_lock(&pool->lock); @@ -1118,16 +1128,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) /* this bit could have been set by free, in which case * we pass over to the next page in the pool. */ - if (test_and_set_bit(PAGE_CLAIMED, &page->private)) + if (test_and_set_bit(PAGE_CLAIMED, &page->private)) { + page = NULL; continue; + } - if (unlikely(PageIsolated(page))) + if (unlikely(PageIsolated(page))) { + clear_bit(PAGE_CLAIMED, &page->private); + page = NULL; continue; + } + zhdr = page_address(page); if (test_bit(PAGE_HEADLESS, &page->private)) break; - zhdr = page_address(page); if (!z3fold_page_trylock(zhdr)) { + clear_bit(PAGE_CLAIMED, &page->private); zhdr = NULL; continue; /* can't evict at this point */ } @@ -1145,26 +1161,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) if (!test_bit(PAGE_HEADLESS, &page->private)) { /* - * We need encode the handles before unlocking, since - * we can race with free that will set - * (first|last)_chunks to 0 + * We need encode the handles before unlocking, and + * use our local slots structure because z3fold_free + * can zero out zhdr->slots and we can't do much + * about that */ first_handle = 0; last_handle = 0; middle_handle = 0; if (zhdr->first_chunks) - first_handle = encode_handle(zhdr, FIRST); + first_handle = __encode_handle(zhdr, &slots, + FIRST); if (zhdr->middle_chunks) - middle_handle = encode_handle(zhdr, MIDDLE); + middle_handle = __encode_handle(zhdr, &slots, + MIDDLE); if (zhdr->last_chunks) - last_handle = encode_handle(zhdr, LAST); + last_handle = __encode_handle(zhdr, &slots, + LAST); /* * it's safe to unlock here because we hold a * reference to this page */ z3fold_page_unlock(zhdr); } else { - first_handle = encode_handle(zhdr, HEADLESS); + first_handle = __encode_handle(zhdr, &slots, HEADLESS); last_handle = middle_handle = 0; } @@ -1194,9 +1214,9 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) spin_lock(&pool->lock); list_add(&page->lru, &pool->lru); spin_unlock(&pool->lock); + clear_bit(PAGE_CLAIMED, &page->private); } else { z3fold_page_lock(zhdr); - clear_bit(PAGE_CLAIMED, &page->private); if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { atomic64_dec(&pool->pages_nr); @@ -1211,6 +1231,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) list_add(&page->lru, &pool->lru); spin_unlock(&pool->lock); z3fold_page_unlock(zhdr); + clear_bit(PAGE_CLAIMED, &page->private); } /* We started off locked to we need to lock the pool back */ @@ -1315,7 +1336,8 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) VM_BUG_ON_PAGE(!PageMovable(page), page); VM_BUG_ON_PAGE(PageIsolated(page), page); - if (test_bit(PAGE_HEADLESS, &page->private)) + if (test_bit(PAGE_HEADLESS, &page->private) || + test_bit(PAGE_CLAIMED, &page->private)) return false; zhdr = page_address(page); diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index a8cb6b2e20c1..5a203acdcae5 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1023,6 +1023,11 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol, */ if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) goto out; + + rc = -EPERM; + if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) + goto out; + rc = -ENOMEM; sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern); if (!sk) diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index ca5207767dc2..bb222b882b67 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -855,6 +855,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol, break; case SOCK_RAW: + if (!capable(CAP_NET_RAW)) + return -EPERM; break; default: return -ESOCKTNOSUPPORT; diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index badc5cfe4dc6..d93d4531aa9b 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -1008,6 +1008,9 @@ static int ieee802154_create(struct net *net, struct socket *sock, switch (sock->type) { case SOCK_RAW: + rc = -EPERM; + if (!capable(CAP_NET_RAW)) + goto out; proto = &ieee802154_raw_prot; ops = &ieee802154_raw_ops; break; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index f5c163d4771b..a9183543ca30 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -560,7 +560,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk, rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) goto no_route; - if (opt && opt->opt.is_strictroute && rt->rt_gw_family) + if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) goto route_err; rcu_read_unlock(); return &rt->dst; @@ -598,7 +598,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) goto no_route; - if (opt && opt->opt.is_strictroute && rt->rt_gw_family) + if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) goto route_err; return &rt->dst; diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 06f6f280b9ff..00ec819f949b 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -123,7 +123,7 @@ int ip_forward(struct sk_buff *skb) rt = skb_rtable(skb); - if (opt->is_strictroute && rt->rt_gw_family) + if (opt->is_strictroute && rt->rt_uses_gateway) goto sr_failed; IPCB(skb)->flags |= IPSKB_FORWARDED; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index cc7ef0d05bbd..da521790cd63 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -499,7 +499,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, skb_dst_set_noref(skb, &rt->dst); packet_routed: - if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gw_family) + if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway) goto no_route; /* OK, we know where to send it, allocate and build IP header. */ diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b6a6f18c3dd1..7dcce724c78b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -635,6 +635,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh if (fnhe->fnhe_gw) { rt->rt_flags |= RTCF_REDIRECTED; + rt->rt_uses_gateway = 1; rt->rt_gw_family = AF_INET; rt->rt_gw4 = fnhe->fnhe_gw; } @@ -1313,7 +1314,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) mtu = READ_ONCE(dst->dev->mtu); if (unlikely(ip_mtu_locked(dst))) { - if (rt->rt_gw_family && mtu > 576) + if (rt->rt_uses_gateway && mtu > 576) mtu = 576; } @@ -1569,6 +1570,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, struct fib_nh_common *nhc = FIB_RES_NHC(*res); if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) { + rt->rt_uses_gateway = 1; rt->rt_gw_family = nhc->nhc_gw_family; /* only INET and INET6 are supported */ if (likely(nhc->nhc_gw_family == AF_INET)) @@ -1634,6 +1636,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev, rt->rt_iif = 0; rt->rt_pmtu = 0; rt->rt_mtu_locked = 0; + rt->rt_uses_gateway = 0; rt->rt_gw_family = 0; rt->rt_gw4 = 0; INIT_LIST_HEAD(&rt->rt_uncached); @@ -2694,6 +2697,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or rt->rt_genid = rt_genid_ipv4(net); rt->rt_flags = ort->rt_flags; rt->rt_type = ort->rt_type; + rt->rt_uses_gateway = ort->rt_uses_gateway; rt->rt_gw_family = ort->rt_gw_family; if (rt->rt_gw_family == AF_INET) rt->rt_gw4 = ort->rt_gw4; @@ -2778,21 +2782,23 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr)) goto nla_put_failure; } - if (rt->rt_gw_family == AF_INET && - nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) { - goto nla_put_failure; - } else if (rt->rt_gw_family == AF_INET6) { - int alen = sizeof(struct in6_addr); - struct nlattr *nla; - struct rtvia *via; - - nla = nla_reserve(skb, RTA_VIA, alen + 2); - if (!nla) + if (rt->rt_uses_gateway) { + if (rt->rt_gw_family == AF_INET && + nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) { goto nla_put_failure; - - via = nla_data(nla); - via->rtvia_family = AF_INET6; - memcpy(via->rtvia_addr, &rt->rt_gw6, alen); + } else if (rt->rt_gw_family == AF_INET6) { + int alen = sizeof(struct in6_addr); + struct nlattr *nla; + struct rtvia *via; + + nla = nla_reserve(skb, RTA_VIA, alen + 2); + if (!nla) + goto nla_put_failure; + + via = nla_data(nla); + via->rtvia_family = AF_INET6; + memcpy(via->rtvia_addr, &rt->rt_gw6, alen); + } } expires = rt->dst.expires; diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 56be7d27f208..00ade9c185ea 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -386,7 +386,7 @@ static u32 bbr_bdp(struct sock *sk, u32 bw, int gain) * which allows 2 outstanding 2-packet sequences, to try to keep pipe * full even with ACK-every-other-packet delayed ACKs. */ -static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain) +static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd) { struct bbr *bbr = inet_csk_ca(sk); @@ -397,7 +397,7 @@ static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain) cwnd = (cwnd + 1) & ~1U; /* Ensure gain cycling gets inflight above BDP even for small BDPs. */ - if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT) + if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == 0) cwnd += 2; return cwnd; @@ -409,7 +409,7 @@ static u32 bbr_inflight(struct sock *sk, u32 bw, int gain) u32 inflight; inflight = bbr_bdp(sk, bw, gain); - inflight = bbr_quantization_budget(sk, inflight, gain); + inflight = bbr_quantization_budget(sk, inflight); return inflight; } @@ -529,7 +529,7 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs, * due to aggregation (of data and/or ACKs) visible in the ACK stream. */ target_cwnd += bbr_ack_aggregation_cwnd(sk); - target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain); + target_cwnd = bbr_quantization_budget(sk, target_cwnd); /* If we're below target cwnd, slow start cwnd toward target cwnd. */ if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */ diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index c801cd37cc2a..3e8b38c73d8c 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -210,7 +210,7 @@ static int tcp_write_timeout(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); - bool expired, do_reset; + bool expired = false, do_reset; int retry_until; if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { @@ -242,9 +242,10 @@ static int tcp_write_timeout(struct sock *sk) if (tcp_out_of_resources(sk, do_reset)) return 1; } + } + if (!expired) expired = retransmits_timed_out(sk, retry_until, icsk->icsk_user_timeout); - } tcp_fastopen_active_detect_blackhole(sk, expired); if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG)) diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index cdef8f9a3b01..35b84b52b702 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -85,6 +85,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL); xdst->u.rt.rt_type = rt->rt_type; + xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; xdst->u.rt.rt_gw_family = rt->rt_gw_family; if (rt->rt_gw_family == AF_INET) xdst->u.rt.rt_gw4 = rt->rt_gw4; diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index d22b6c140f23..f9e8fe3ff0c5 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -287,7 +287,8 @@ static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg return false; suppress_route: - ip6_rt_put(rt); + if (!(arg->flags & FIB_LOOKUP_NOREF)) + ip6_rt_put(rt); return true; } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 87f47bc55c5e..6e2af411cd9c 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -318,7 +318,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, if (rt->dst.error == -EAGAIN) { ip6_rt_put_flags(rt, flags); rt = net->ipv6.ip6_null_entry; - if (!(flags | RT6_LOOKUP_F_DST_NOREF)) + if (!(flags & RT6_LOOKUP_F_DST_NOREF)) dst_hold(&rt->dst); } diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 9b8742947aff..8dfea26536c9 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -1004,10 +1004,13 @@ static int llcp_sock_create(struct net *net, struct socket *sock, sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; - if (sock->type == SOCK_RAW) + if (sock->type == SOCK_RAW) { + if (!capable(CAP_NET_RAW)) + return -EPERM; sock->ops = &llcp_rawsock_ops; - else + } else { sock->ops = &llcp_sock_ops; + } sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern); if (sk == NULL) diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index d01410e52097..f1e7041a5a60 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -2263,7 +2263,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, - [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 }, + [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC }, [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 }, [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 }, diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 6c8b0f6d28f9..88f98f27ad88 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -150,6 +150,7 @@ static void __qrtr_node_release(struct kref *kref) list_del(&node->item); mutex_unlock(&qrtr_node_lock); + cancel_work_sync(&node->work); skb_queue_purge(&node->rx_queue); kfree(node); } diff --git a/net/rds/bind.c b/net/rds/bind.c index 05464fd7c17a..93e336535d3b 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -244,7 +244,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) */ if (rs->rs_transport) { trans = rs->rs_transport; - if (trans->laddr_check(sock_net(sock->sk), + if (!trans->laddr_check || + trans->laddr_check(sock_net(sock->sk), binding_addr, scope_id) != 0) { ret = -ENOPROTOOPT; goto out; @@ -263,6 +264,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) sock_set_flag(sk, SOCK_RCU_FREE); ret = rds_add_bound(rs, binding_addr, &port, scope_id); + if (ret) + rs->rs_transport = NULL; out: release_sock(sk); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 339712296164..2558f00f6b3e 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -831,6 +831,15 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) return c; } +static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = { + [TCA_ACT_KIND] = { .type = NLA_NUL_STRING, + .len = IFNAMSIZ - 1 }, + [TCA_ACT_INDEX] = { .type = NLA_U32 }, + [TCA_ACT_COOKIE] = { .type = NLA_BINARY, + .len = TC_COOKIE_MAX_SIZE }, + [TCA_ACT_OPTIONS] = { .type = NLA_NESTED }, +}; + struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind, @@ -846,8 +855,8 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, int err; if (name == NULL) { - err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, NULL, - extack); + err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, + tcf_action_policy, extack); if (err < 0) goto err_out; err = -EINVAL; @@ -856,18 +865,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, NL_SET_ERR_MSG(extack, "TC action kind must be specified"); goto err_out; } - if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) { - NL_SET_ERR_MSG(extack, "TC action name too long"); - goto err_out; - } - if (tb[TCA_ACT_COOKIE]) { - int cklen = nla_len(tb[TCA_ACT_COOKIE]); - - if (cklen > TC_COOKIE_MAX_SIZE) { - NL_SET_ERR_MSG(extack, "TC cookie size above the maximum"); - goto err_out; - } + nla_strlcpy(act_name, kind, IFNAMSIZ); + if (tb[TCA_ACT_COOKIE]) { cookie = nla_memdup_cookie(tb); if (!cookie) { NL_SET_ERR_MSG(extack, "No memory to generate TC cookie"); @@ -1098,7 +1098,8 @@ static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, int index; int err; - err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, NULL, extack); + err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, + tcf_action_policy, extack); if (err < 0) goto err_out; @@ -1152,7 +1153,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, b = skb_tail_pointer(skb); - err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, NULL, extack); + err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, + tcf_action_policy, extack); if (err < 0) goto err_out; @@ -1440,7 +1442,7 @@ static struct nlattr *find_dump_kind(struct nlattr **nla) if (tb[1] == NULL) return NULL; - if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], NULL, NULL) < 0) + if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0) return NULL; kind = tb2[TCA_ACT_KIND]; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 10229124a992..86344fd2ff1f 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -146,6 +146,7 @@ static bool tcf_sample_dev_ok_push(struct net_device *dev) case ARPHRD_TUNNEL6: case ARPHRD_SIT: case ARPHRD_IPGRE: + case ARPHRD_IP6GRE: case ARPHRD_VOID: case ARPHRD_NONE: return false; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index efd3cfb80a2a..9aef93300f1c 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3027,8 +3027,10 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) void tcf_exts_destroy(struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT - tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); - kfree(exts->actions); + if (exts->actions) { + tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); + kfree(exts->actions); + } exts->nr_actions = 0; #endif } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 1047825d9f48..81d58b280612 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1390,7 +1390,8 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w) } const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = { - [TCA_KIND] = { .type = NLA_STRING }, + [TCA_KIND] = { .type = NLA_NUL_STRING, + .len = IFNAMSIZ - 1 }, [TCA_RATE] = { .type = NLA_BINARY, .len = sizeof(struct tc_estimator) }, [TCA_STAB] = { .type = NLA_NESTED }, diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index 810645b5c086..4a403d35438f 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -392,7 +392,6 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt, { struct cbs_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); - int err; if (!opt) { NL_SET_ERR_MSG(extack, "Missing CBS qdisc options which are mandatory"); @@ -404,6 +403,10 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt, if (!q->qdisc) return -ENOMEM; + spin_lock(&cbs_list_lock); + list_add(&q->cbs_list, &cbs_list); + spin_unlock(&cbs_list_lock); + qdisc_hash_add(q->qdisc, false); q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); @@ -413,17 +416,7 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt, qdisc_watchdog_init(&q->watchdog, sch); - err = cbs_change(sch, opt, extack); - if (err) - return err; - - if (!q->offload) { - spin_lock(&cbs_list_lock); - list_add(&q->cbs_list, &cbs_list); - spin_unlock(&cbs_list_lock); - } - - return 0; + return cbs_change(sch, opt, extack); } static void cbs_destroy(struct Qdisc *sch) @@ -431,15 +424,18 @@ static void cbs_destroy(struct Qdisc *sch) struct cbs_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); - spin_lock(&cbs_list_lock); - list_del(&q->cbs_list); - spin_unlock(&cbs_list_lock); + /* Nothing to do if we couldn't create the underlying qdisc */ + if (!q->qdisc) + return; qdisc_watchdog_cancel(&q->watchdog); cbs_disable_offload(dev, q); - if (q->qdisc) - qdisc_put(q->qdisc); + spin_lock(&cbs_list_lock); + list_del(&q->cbs_list); + spin_unlock(&cbs_list_lock); + + qdisc_put(q->qdisc); } static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb) diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index b17f2ed970e2..f5cb35e550f8 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -777,7 +777,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, struct disttable *d; int i; - if (n > NETEM_DIST_MAX) + if (!n || n > NETEM_DIST_MAX) return -EINVAL; d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index a07b516e503a..7a75f34ad393 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1862,6 +1862,7 @@ rpc_xdr_encode(struct rpc_task *task) req->rq_rbuffer, req->rq_rcvsize); + req->rq_reply_bytes_recvd = 0; req->rq_snd_buf.head[0].iov_len = 0; xdr_init_encode(&xdr, &req->rq_snd_buf, req->rq_snd_buf.head[0].iov_base, req); @@ -1881,6 +1882,8 @@ call_encode(struct rpc_task *task) if (!rpc_task_need_encode(task)) goto out; dprint_status(task); + /* Dequeue task from the receive queue while we're encoding */ + xprt_request_dequeue_xprt(task); /* Encode here so that rpcsec_gss can use correct sequence number. */ rpc_xdr_encode(task); /* Did the encode result in an error condition? */ @@ -2518,9 +2521,6 @@ call_decode(struct rpc_task *task) return; case -EAGAIN: task->tk_status = 0; - xdr_free_bvec(&req->rq_rcv_buf); - req->rq_reply_bytes_recvd = 0; - req->rq_rcv_buf.len = 0; if (task->tk_client->cl_discrtry) xprt_conditional_disconnect(req->rq_xprt, req->rq_connect_cookie); diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 48c93b9e525e..b256806d69cd 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1237,16 +1237,29 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) EXPORT_SYMBOL_GPL(xdr_encode_word); /* If the netobj starting offset bytes from the start of xdr_buf is contained - * entirely in the head or the tail, set object to point to it; otherwise - * try to find space for it at the end of the tail, copy it there, and - * set obj to point to it. */ + * entirely in the head, pages, or tail, set object to point to it; otherwise + * shift the buffer until it is contained entirely within the pages or tail. + */ int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset) { struct xdr_buf subbuf; + unsigned int boundary; if (xdr_decode_word(buf, offset, &obj->len)) return -EFAULT; - if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len)) + offset += 4; + + /* Is the obj partially in the head? */ + boundary = buf->head[0].iov_len; + if (offset < boundary && (offset + obj->len) > boundary) + xdr_shift_buf(buf, boundary - offset); + + /* Is the obj partially in the pages? */ + boundary += buf->page_len; + if (offset < boundary && (offset + obj->len) > boundary) + xdr_shrink_pagelen(buf, boundary - offset); + + if (xdr_buf_subsegment(buf, &subbuf, offset, obj->len)) return -EFAULT; /* Is the obj contained entirely in the head? */ @@ -1258,11 +1271,7 @@ int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned in if (subbuf.tail[0].iov_len == obj->len) return 0; - /* use end of tail as storage for obj: - * (We don't copy to the beginning because then we'd have - * to worry about doing a potentially overlapping copy. - * This assumes the object is at most half the length of the - * tail.) */ + /* Find a contiguous area in @buf to hold all of @obj */ if (obj->len > buf->buflen - buf->len) return -ENOMEM; if (buf->tail[0].iov_len != 0) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 2e71f5455c6c..20631d64312c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1323,6 +1323,36 @@ xprt_request_dequeue_transmit(struct rpc_task *task) spin_unlock(&xprt->queue_lock); } +/** + * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue + * @task: pointer to rpc_task + * + * Remove a task from the transmit and receive queues, and ensure that + * it is not pinned by the receive work item. + */ +void +xprt_request_dequeue_xprt(struct rpc_task *task) +{ + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_xprt *xprt = req->rq_xprt; + + if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) || + test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) || + xprt_is_pinned_rqst(req)) { + spin_lock(&xprt->queue_lock); + xprt_request_dequeue_transmit_locked(task); + xprt_request_dequeue_receive_locked(task); + while (xprt_is_pinned_rqst(req)) { + set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); + spin_unlock(&xprt->queue_lock); + xprt_wait_on_pinned_rqst(req); + spin_lock(&xprt->queue_lock); + clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); + } + spin_unlock(&xprt->queue_lock); + } +} + /** * xprt_request_prepare - prepare an encoded request for transport * @req: pointer to rpc_rqst @@ -1747,28 +1777,6 @@ void xprt_retry_reserve(struct rpc_task *task) xprt_do_reserve(xprt, task); } -static void -xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req) -{ - struct rpc_xprt *xprt = req->rq_xprt; - - if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) || - test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) || - xprt_is_pinned_rqst(req)) { - spin_lock(&xprt->queue_lock); - xprt_request_dequeue_transmit_locked(task); - xprt_request_dequeue_receive_locked(task); - while (xprt_is_pinned_rqst(req)) { - set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); - spin_unlock(&xprt->queue_lock); - xprt_wait_on_pinned_rqst(req); - spin_lock(&xprt->queue_lock); - clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); - } - spin_unlock(&xprt->queue_lock); - } -} - /** * xprt_release - release an RPC request slot * @task: task which is finished with the slot @@ -1788,7 +1796,7 @@ void xprt_release(struct rpc_task *task) } xprt = req->rq_xprt; - xprt_request_dequeue_all(task, req); + xprt_request_dequeue_xprt(task); spin_lock(&xprt->transport_lock); xprt->ops->release_xprt(xprt, task); if (xprt->ops->release_request) diff --git a/net/wireless/util.c b/net/wireless/util.c index e74837824cea..f68818dbac1a 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -960,6 +960,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, } cfg80211_process_rdev_events(rdev); + cfg80211_mlme_purge_registrations(dev->ieee80211_ptr); } err = rdev_change_virtual_intf(rdev, dev, ntype, params); diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index 6410bd22fe38..03757cc60e06 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan @@ -1,4 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 +ifdef CONFIG_KASAN +CFLAGS_KASAN_NOSANITIZE := -fno-builtin +KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET) +endif + ifdef CONFIG_KASAN_GENERIC ifdef CONFIG_KASAN_INLINE @@ -7,8 +12,6 @@ else call_threshold := 0 endif -KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET) - CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1))) @@ -45,7 +48,3 @@ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \ $(instrumentation_flags) endif # CONFIG_KASAN_SW_TAGS - -ifdef CONFIG_KASAN -CFLAGS_KASAN_NOSANITIZE := -fno-builtin -endif diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c index 6d5bbd31db7f..bd29e4e7a524 100644 --- a/scripts/gcc-plugins/randomize_layout_plugin.c +++ b/scripts/gcc-plugins/randomize_layout_plugin.c @@ -443,13 +443,13 @@ static int is_pure_ops_struct(const_tree node) if (node == fieldtype) continue; - if (!is_fptr(fieldtype)) - return 0; - - if (code != RECORD_TYPE && code != UNION_TYPE) + if (code == RECORD_TYPE || code == UNION_TYPE) { + if (!is_pure_ops_struct(fieldtype)) + return 0; continue; + } - if (!is_pure_ops_struct(fieldtype)) + if (!is_fptr(fieldtype)) return 0; } diff --git a/security/keys/trusted.c b/security/keys/trusted.c index ade699131065..1fbd77816610 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -1228,11 +1228,16 @@ static int __init trusted_shash_alloc(void) static int __init init_digests(void) { + int i; + digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests), GFP_KERNEL); if (!digests) return -ENOMEM; + for (i = 0; i < chip->nr_allocated_banks; i++) + digests[i].alg_id = chip->allocated_banks[i].alg_id; + return 0; } diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c index 03cda2166ea3..72908b4de77c 100644 --- a/sound/firewire/motu/motu.c +++ b/sound/firewire/motu/motu.c @@ -247,6 +247,17 @@ static const struct snd_motu_spec motu_audio_express = { .analog_out_ports = 4, }; +static const struct snd_motu_spec motu_4pre = { + .name = "4pre", + .protocol = &snd_motu_protocol_v3, + .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 | + SND_MOTU_SPEC_TX_MICINST_CHUNK | + SND_MOTU_SPEC_TX_RETURN_CHUNK | + SND_MOTU_SPEC_RX_SEPARETED_MAIN, + .analog_in_ports = 2, + .analog_out_ports = 2, +}; + #define SND_MOTU_DEV_ENTRY(model, data) \ { \ .match_flags = IEEE1394_MATCH_VENDOR_ID | \ @@ -265,6 +276,7 @@ static const struct ieee1394_device_id motu_id_table[] = { SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3), /* FireWire only. */ SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3), /* Hybrid. */ SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express), + SND_MOTU_DEV_ENTRY(0x000045, &motu_4pre), { } }; MODULE_DEVICE_TABLE(ieee1394, motu_id_table); diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c index b5ced5415e40..2377732caa52 100644 --- a/sound/firewire/tascam/tascam-pcm.c +++ b/sound/firewire/tascam/tascam-pcm.c @@ -56,6 +56,9 @@ static int pcm_open(struct snd_pcm_substream *substream) goto err_locked; err = snd_tscm_stream_get_clock(tscm, &clock); + if (err < 0) + goto err_locked; + if (clock != SND_TSCM_CLOCK_INTERNAL || amdtp_stream_pcm_running(&tscm->rx_stream) || amdtp_stream_pcm_running(&tscm->tx_stream)) { diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c index e852e46ebe6f..ccfa92fbc145 100644 --- a/sound/firewire/tascam/tascam-stream.c +++ b/sound/firewire/tascam/tascam-stream.c @@ -8,20 +8,37 @@ #include <linux/delay.h> #include "tascam.h" +#define CLOCK_STATUS_MASK 0xffff0000 +#define CLOCK_CONFIG_MASK 0x0000ffff + #define CALLBACK_TIMEOUT 500 static int get_clock(struct snd_tscm *tscm, u32 *data) { + int trial = 0; __be32 reg; int err; - err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST, - TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS, - ®, sizeof(reg), 0); - if (err >= 0) + while (trial++ < 5) { + err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST, + TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS, + ®, sizeof(reg), 0); + if (err < 0) + return err; + *data = be32_to_cpu(reg); + if (*data & CLOCK_STATUS_MASK) + break; - return err; + // In intermediate state after changing clock status. + msleep(50); + } + + // Still in the intermediate state. + if (trial >= 5) + return -EAGAIN; + + return 0; } static int set_clock(struct snd_tscm *tscm, unsigned int rate, @@ -34,7 +51,7 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate, err = get_clock(tscm, &data); if (err < 0) return err; - data &= 0x0000ffff; + data &= CLOCK_CONFIG_MASK; if (rate > 0) { data &= 0x000000ff; @@ -79,17 +96,14 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate, int snd_tscm_stream_get_rate(struct snd_tscm *tscm, unsigned int *rate) { - u32 data = 0x0; - unsigned int trials = 0; + u32 data; int err; - while (data == 0x0 || trials++ < 5) { - err = get_clock(tscm, &data); - if (err < 0) - return err; + err = get_clock(tscm, &data); + if (err < 0) + return err; - data = (data & 0xff000000) >> 24; - } + data = (data & 0xff000000) >> 24; /* Check base rate. */ if ((data & 0x0f) == 0x01) diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 3b0110545070..196bbc85699e 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -447,6 +447,8 @@ static void azx_int_disable(struct hdac_bus *bus) list_for_each_entry(azx_dev, &bus->stream_list, list) snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); + synchronize_irq(bus->irq); + /* disable SIE for all streams */ snd_hdac_chip_writeb(bus, INTCTL, 0); diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c index 5f59316f982a..7d15093844b9 100644 --- a/sound/i2c/other/ak4xxx-adda.c +++ b/sound/i2c/other/ak4xxx-adda.c @@ -775,11 +775,12 @@ static int build_adc_controls(struct snd_akm4xxx *ak) return err; memset(&knew, 0, sizeof(knew)); - knew.name = ak->adc_info[mixer_ch].selector_name; - if (!knew.name) { + if (!ak->adc_info || + !ak->adc_info[mixer_ch].selector_name) { knew.name = "Capture Channel"; knew.index = mixer_ch + ak->idx_offset * 2; - } + } else + knew.name = ak->adc_info[mixer_ch].selector_name; knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.info = ak4xxx_capture_source_info; diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 51f10ed9bc43..a2fb19129219 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -846,7 +846,13 @@ static void snd_hda_codec_dev_release(struct device *dev) snd_hda_sysfs_clear(codec); kfree(codec->modelname); kfree(codec->wcaps); - kfree(codec); + + /* + * In the case of ASoC HD-audio, hda_codec is device managed. + * It will be freed when the ASoC device is removed. + */ + if (codec->core.type == HDA_DEV_LEGACY) + kfree(codec); } #define DEV_NAME_LEN 31 diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 48d863736b3c..a5a2e9fe7785 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -869,10 +869,13 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr, */ if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) { hbus->response_reset = 1; + dev_err(chip->card->dev, + "No response from codec, resetting bus: last cmd=0x%08x\n", + bus->last_cmd[addr]); return -EAGAIN; /* give a chance to retry */ } - dev_err(chip->card->dev, + dev_WARN(chip->card->dev, "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n", bus->last_cmd[addr]); chip->single_cmd = 1; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index b0de3e3b33e5..783f9a9c40ec 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1349,9 +1349,9 @@ static int azx_free(struct azx *chip) } if (bus->chip_init) { + azx_stop_chip(chip); azx_clear_irq_pending(chip); azx_stop_all_streams(chip); - azx_stop_chip(chip); } if (bus->irq >= 0) diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index bea7b0961080..36240def9bf5 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1421,7 +1421,7 @@ static void hdmi_pcm_reset_pin(struct hdmi_spec *spec, /* update per_pin ELD from the given new ELD; * setup info frame and notification accordingly */ -static void update_eld(struct hda_codec *codec, +static bool update_eld(struct hda_codec *codec, struct hdmi_spec_per_pin *per_pin, struct hdmi_eld *eld) { @@ -1452,18 +1452,22 @@ static void update_eld(struct hda_codec *codec, snd_hdmi_show_eld(codec, &eld->info); eld_changed = (pin_eld->eld_valid != eld->eld_valid); - if (eld->eld_valid && pin_eld->eld_valid) + eld_changed |= (pin_eld->monitor_present != eld->monitor_present); + if (!eld_changed && eld->eld_valid && pin_eld->eld_valid) if (pin_eld->eld_size != eld->eld_size || memcmp(pin_eld->eld_buffer, eld->eld_buffer, eld->eld_size) != 0) eld_changed = true; - pin_eld->monitor_present = eld->monitor_present; - pin_eld->eld_valid = eld->eld_valid; - pin_eld->eld_size = eld->eld_size; - if (eld->eld_valid) - memcpy(pin_eld->eld_buffer, eld->eld_buffer, eld->eld_size); - pin_eld->info = eld->info; + if (eld_changed) { + pin_eld->monitor_present = eld->monitor_present; + pin_eld->eld_valid = eld->eld_valid; + pin_eld->eld_size = eld->eld_size; + if (eld->eld_valid) + memcpy(pin_eld->eld_buffer, eld->eld_buffer, + eld->eld_size); + pin_eld->info = eld->info; + } /* * Re-setup pin and infoframe. This is needed e.g. when @@ -1481,6 +1485,7 @@ static void update_eld(struct hda_codec *codec, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &get_hdmi_pcm(spec, pcm_idx)->eld_ctl->id); + return eld_changed; } /* update ELD and jack state via HD-audio verbs */ @@ -1582,6 +1587,7 @@ static void sync_eld_via_acomp(struct hda_codec *codec, struct hdmi_spec *spec = codec->spec; struct hdmi_eld *eld = &spec->temp_eld; struct snd_jack *jack = NULL; + bool changed; int size; mutex_lock(&per_pin->lock); @@ -1608,15 +1614,13 @@ static void sync_eld_via_acomp(struct hda_codec *codec, * disconnected event. Jack must be fetched before update_eld() */ jack = pin_idx_to_jack(codec, per_pin); - update_eld(codec, per_pin, eld); + changed = update_eld(codec, per_pin, eld); if (jack == NULL) jack = pin_idx_to_jack(codec, per_pin); - if (jack == NULL) - goto unlock; - snd_jack_report(jack, - (eld->monitor_present && eld->eld_valid) ? + if (changed && jack) + snd_jack_report(jack, + (eld->monitor_present && eld->eld_valid) ? SND_JACK_AVOUT : 0); - unlock: mutex_unlock(&per_pin->lock); } @@ -2612,6 +2616,8 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec, /* precondition and allocation for Intel codecs */ static int alloc_intel_hdmi(struct hda_codec *codec) { + int err; + /* requires i915 binding */ if (!codec->bus->core.audio_component) { codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n"); @@ -2620,7 +2626,12 @@ static int alloc_intel_hdmi(struct hda_codec *codec) return -ENODEV; } - return alloc_generic_hdmi(codec); + err = alloc_generic_hdmi(codec); + if (err < 0) + return err; + /* no need to handle unsol events */ + codec->patch_ops.unsol_event = NULL; + return 0; } /* parse and post-process for Intel codecs */ diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c1ddfd2fac52..36aee8ad2054 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1058,6 +1058,9 @@ static const struct snd_pci_quirk beep_white_list[] = { SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1), SND_PCI_QUIRK(0x1458, 0xa002, "GA-MA790X", 1), SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), + /* blacklist -- no beep available */ + SND_PCI_QUIRK(0x17aa, 0x309e, "Lenovo ThinkCentre M73", 0), + SND_PCI_QUIRK(0x17aa, 0x30a3, "Lenovo ThinkCentre M93", 0), {} }; @@ -3755,6 +3758,72 @@ static void alc269_x101_hp_automute_hook(struct hda_codec *codec, vref); } +/* + * Magic sequence to make Huawei Matebook X right speaker working (bko#197801) + */ +struct hda_alc298_mbxinit { + unsigned char value_0x23; + unsigned char value_0x25; +}; + +static void alc298_huawei_mbx_stereo_seq(struct hda_codec *codec, + const struct hda_alc298_mbxinit *initval, + bool first) +{ + snd_hda_codec_write(codec, 0x06, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x0); + alc_write_coef_idx(codec, 0x26, 0xb000); + + if (first) + snd_hda_codec_write(codec, 0x21, 0, AC_VERB_GET_PIN_SENSE, 0x0); + + snd_hda_codec_write(codec, 0x6, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x80); + alc_write_coef_idx(codec, 0x26, 0xf000); + alc_write_coef_idx(codec, 0x23, initval->value_0x23); + + if (initval->value_0x23 != 0x1e) + alc_write_coef_idx(codec, 0x25, initval->value_0x25); + + snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 0x26); + snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, 0xb010); +} + +static void alc298_fixup_huawei_mbx_stereo(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + /* Initialization magic */ + static const struct hda_alc298_mbxinit dac_init[] = { + {0x0c, 0x00}, {0x0d, 0x00}, {0x0e, 0x00}, {0x0f, 0x00}, + {0x10, 0x00}, {0x1a, 0x40}, {0x1b, 0x82}, {0x1c, 0x00}, + {0x1d, 0x00}, {0x1e, 0x00}, {0x1f, 0x00}, + {0x20, 0xc2}, {0x21, 0xc8}, {0x22, 0x26}, {0x23, 0x24}, + {0x27, 0xff}, {0x28, 0xff}, {0x29, 0xff}, {0x2a, 0x8f}, + {0x2b, 0x02}, {0x2c, 0x48}, {0x2d, 0x34}, {0x2e, 0x00}, + {0x2f, 0x00}, + {0x30, 0x00}, {0x31, 0x00}, {0x32, 0x00}, {0x33, 0x00}, + {0x34, 0x00}, {0x35, 0x01}, {0x36, 0x93}, {0x37, 0x0c}, + {0x38, 0x00}, {0x39, 0x00}, {0x3a, 0xf8}, {0x38, 0x80}, + {} + }; + const struct hda_alc298_mbxinit *seq; + + if (action != HDA_FIXUP_ACT_INIT) + return; + + /* Start */ + snd_hda_codec_write(codec, 0x06, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x00); + snd_hda_codec_write(codec, 0x06, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x80); + alc_write_coef_idx(codec, 0x26, 0xf000); + alc_write_coef_idx(codec, 0x22, 0x31); + alc_write_coef_idx(codec, 0x23, 0x0b); + alc_write_coef_idx(codec, 0x25, 0x00); + snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 0x26); + snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, 0xb010); + + for (seq = dac_init; seq->value_0x23; seq++) + alc298_huawei_mbx_stereo_seq(codec, seq, seq == dac_init); +} + static void alc269_fixup_x101_headset_mic(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -5780,6 +5849,7 @@ enum { ALC255_FIXUP_DUMMY_LINEOUT_VERB, ALC255_FIXUP_DELL_HEADSET_MIC, ALC256_FIXUP_HUAWEI_MACH_WX9_PINS, + ALC298_FIXUP_HUAWEI_MBX_STEREO, ALC295_FIXUP_HP_X360, ALC221_FIXUP_HP_HEADSET_MIC, ALC285_FIXUP_LENOVO_HEADPHONE_NOISE, @@ -5800,6 +5870,7 @@ enum { ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, ALC299_FIXUP_PREDATOR_SPK, ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC, + ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, }; static const struct hda_fixup alc269_fixups[] = { @@ -6089,6 +6160,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC255_FIXUP_MIC_MUTE_LED }, + [ALC298_FIXUP_HUAWEI_MBX_STEREO] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc298_fixup_huawei_mbx_stereo, + .chained = true, + .chain_id = ALC255_FIXUP_MIC_MUTE_LED + }, [ALC269_FIXUP_ASUS_X101_FUNC] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_x101_headset_mic, @@ -6850,6 +6927,16 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, + [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x04a11040 }, + { 0x21, 0x04211020 }, + { } + }, + .chained = true, + .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -7113,6 +7200,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ + SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE), #if 0 /* Below is a quirk table taken from the old code. @@ -7280,6 +7368,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-headset-jack"}, {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"}, {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"}, + {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"}, + {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"}, {} }; #define ALC225_STANDARD_PINS \ diff --git a/sound/soc/atmel/mchp-i2s-mcc.c b/sound/soc/atmel/mchp-i2s-mcc.c index 86495883ca3f..ab7d5f98e759 100644 --- a/sound/soc/atmel/mchp-i2s-mcc.c +++ b/sound/soc/atmel/mchp-i2s-mcc.c @@ -670,8 +670,13 @@ static int mchp_i2s_mcc_hw_params(struct snd_pcm_substream *substream, } ret = regmap_write(dev->regmap, MCHP_I2SMCC_MRA, mra); - if (ret < 0) + if (ret < 0) { + if (dev->gclk_use) { + clk_unprepare(dev->gclk); + dev->gclk_use = 0; + } return ret; + } return regmap_write(dev->regmap, MCHP_I2SMCC_MRB, mrb); } @@ -686,31 +691,37 @@ static int mchp_i2s_mcc_hw_free(struct snd_pcm_substream *substream, err = wait_event_interruptible_timeout(dev->wq_txrdy, dev->tx_rdy, msecs_to_jiffies(500)); + if (err == 0) { + dev_warn_once(dev->dev, + "Timeout waiting for Tx ready\n"); + regmap_write(dev->regmap, MCHP_I2SMCC_IDRA, + MCHP_I2SMCC_INT_TXRDY_MASK(dev->channels)); + dev->tx_rdy = 1; + } } else { err = wait_event_interruptible_timeout(dev->wq_rxrdy, dev->rx_rdy, msecs_to_jiffies(500)); - } - - if (err == 0) { - u32 idra; - - dev_warn_once(dev->dev, "Timeout waiting for %s\n", - is_playback ? "Tx ready" : "Rx ready"); - if (is_playback) - idra = MCHP_I2SMCC_INT_TXRDY_MASK(dev->channels); - else - idra = MCHP_I2SMCC_INT_RXRDY_MASK(dev->channels); - regmap_write(dev->regmap, MCHP_I2SMCC_IDRA, idra); + if (err == 0) { + dev_warn_once(dev->dev, + "Timeout waiting for Rx ready\n"); + regmap_write(dev->regmap, MCHP_I2SMCC_IDRA, + MCHP_I2SMCC_INT_RXRDY_MASK(dev->channels)); + dev->rx_rdy = 1; + } } if (!mchp_i2s_mcc_is_running(dev)) { regmap_write(dev->regmap, MCHP_I2SMCC_CR, MCHP_I2SMCC_CR_CKDIS); if (dev->gclk_running) { - clk_disable_unprepare(dev->gclk); + clk_disable(dev->gclk); dev->gclk_running = 0; } + if (dev->gclk_use) { + clk_unprepare(dev->gclk); + dev->gclk_use = 0; + } } return 0; @@ -809,6 +820,8 @@ static int mchp_i2s_mcc_dai_probe(struct snd_soc_dai *dai) init_waitqueue_head(&dev->wq_txrdy); init_waitqueue_head(&dev->wq_rxrdy); + dev->tx_rdy = 1; + dev->rx_rdy = 1; snd_soc_dai_init_dma_data(dai, &dev->playback, &dev->capture); diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c index 6db002cc2058..96d04896193f 100644 --- a/sound/soc/codecs/es8316.c +++ b/sound/soc/codecs/es8316.c @@ -51,7 +51,10 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9600, 50, 1); static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_max_gain_tlv, -650, 150, 0); static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_min_gain_tlv, -1200, 150, 0); static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_target_tlv, -1650, 150, 0); -static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(hpmixer_gain_tlv, -1200, 150, 0); +static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpmixer_gain_tlv, + 0, 4, TLV_DB_SCALE_ITEM(-1200, 150, 0), + 8, 11, TLV_DB_SCALE_ITEM(-450, 150, 0), +); static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv, 0, 0, TLV_DB_SCALE_ITEM(-350, 0, 0), @@ -89,7 +92,7 @@ static const struct snd_kcontrol_new es8316_snd_controls[] = { SOC_DOUBLE_TLV("Headphone Playback Volume", ES8316_CPHP_ICAL_VOL, 4, 0, 3, 1, hpout_vol_tlv), SOC_DOUBLE_TLV("Headphone Mixer Volume", ES8316_HPMIX_VOL, - 0, 4, 7, 0, hpmixer_gain_tlv), + 0, 4, 11, 0, hpmixer_gain_tlv), SOC_ENUM("Playback Polarity", dacpol), SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8316_DAC_VOLL, diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c index 7d4940256914..91242b6f8ea7 100644 --- a/sound/soc/codecs/hdac_hda.c +++ b/sound/soc/codecs/hdac_hda.c @@ -495,6 +495,10 @@ static int hdac_hda_dev_probe(struct hdac_device *hdev) static int hdac_hda_dev_remove(struct hdac_device *hdev) { + struct hdac_hda_priv *hda_pvt; + + hda_pvt = dev_get_drvdata(&hdev->dev); + cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work); return 0; } diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index a6a4748c97f9..7cbaedffa1ef 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1173,12 +1173,17 @@ static int sgtl5000_set_power_regs(struct snd_soc_component *component) SGTL5000_INT_OSC_EN); /* Enable VDDC charge pump */ ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP; - } else if (vddio >= 3100 && vdda >= 3100) { + } else { ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP; - /* VDDC use VDDIO rail */ - lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; - lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << - SGTL5000_VDDC_MAN_ASSN_SHIFT; + /* + * if vddio == vdda the source of charge pump should be + * assigned manually to VDDIO + */ + if (vddio == vdda) { + lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; + lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << + SGTL5000_VDDC_MAN_ASSN_SHIFT; + } } snd_soc_component_write(component, SGTL5000_CHIP_LINREG_CTRL, lreg_ctrl); @@ -1288,6 +1293,7 @@ static int sgtl5000_probe(struct snd_soc_component *component) int ret; u16 reg; struct sgtl5000_priv *sgtl5000 = snd_soc_component_get_drvdata(component); + unsigned int zcd_mask = SGTL5000_HP_ZCD_EN | SGTL5000_ADC_ZCD_EN; /* power up sgtl5000 */ ret = sgtl5000_set_power_regs(component); @@ -1315,9 +1321,8 @@ static int sgtl5000_probe(struct snd_soc_component *component) 0x1f); snd_soc_component_write(component, SGTL5000_CHIP_PAD_STRENGTH, reg); - snd_soc_component_write(component, SGTL5000_CHIP_ANA_CTRL, - SGTL5000_HP_ZCD_EN | - SGTL5000_ADC_ZCD_EN); + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL, + zcd_mask, zcd_mask); snd_soc_component_update_bits(component, SGTL5000_CHIP_MIC_CTRL, SGTL5000_BIAS_R_MASK, diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c index 9b37e98da0db..26a4f6cd3288 100644 --- a/sound/soc/codecs/tlv320aic31xx.c +++ b/sound/soc/codecs/tlv320aic31xx.c @@ -1553,7 +1553,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c, aic31xx->gpio_reset = devm_gpiod_get_optional(aic31xx->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(aic31xx->gpio_reset)) { - dev_err(aic31xx->dev, "not able to acquire gpio\n"); + if (PTR_ERR(aic31xx->gpio_reset) != -EPROBE_DEFER) + dev_err(aic31xx->dev, "not able to acquire gpio\n"); return PTR_ERR(aic31xx->gpio_reset); } @@ -1564,7 +1565,9 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c, ARRAY_SIZE(aic31xx->supplies), aic31xx->supplies); if (ret) { - dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(aic31xx->dev, + "Failed to request supplies: %d\n", ret); return ret; } diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index fa862af25c1a..085855f9b08d 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -799,15 +799,6 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream, u32 wl = SSI_SxCCR_WL(sample_size); int ret; - /* - * SSI is properly configured if it is enabled and running in - * the synchronous mode; Note that AC97 mode is an exception - * that should set separate configurations for STCCR and SRCCR - * despite running in the synchronous mode. - */ - if (ssi->streams && ssi->synchronous) - return 0; - if (fsl_ssi_is_i2s_master(ssi)) { ret = fsl_ssi_set_bclk(substream, dai, hw_params); if (ret) @@ -823,6 +814,15 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream, } } + /* + * SSI is properly configured if it is enabled and running in + * the synchronous mode; Note that AC97 mode is an exception + * that should set separate configurations for STCCR and SRCCR + * despite running in the synchronous mode. + */ + if (ssi->streams && ssi->synchronous) + return 0; + if (!fsl_ssi_is_ac97(ssi)) { /* * Keep the ssi->i2s_net intact while having a local variable diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c index 0e8e0a7a11df..5854868650b9 100644 --- a/sound/soc/intel/common/sst-acpi.c +++ b/sound/soc/intel/common/sst-acpi.c @@ -141,11 +141,12 @@ static int sst_acpi_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, sst_acpi); + mach->pdata = sst_pdata; /* register machine driver */ sst_acpi->pdev_mach = platform_device_register_data(dev, mach->drv_name, -1, - sst_pdata, sizeof(*sst_pdata)); + mach, sizeof(*mach)); if (IS_ERR(sst_acpi->pdev_mach)) return PTR_ERR(sst_acpi->pdev_mach); diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c index ef5b66af1cd2..3a66121ee9bb 100644 --- a/sound/soc/intel/common/sst-ipc.c +++ b/sound/soc/intel/common/sst-ipc.c @@ -222,6 +222,8 @@ struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc, if (ipc->ops.reply_msg_match != NULL) header = ipc->ops.reply_msg_match(header, &mask); + else + mask = (u64)-1; if (list_empty(&ipc->rx_list)) { dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n", diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c index b9b4a72a4334..b28a9c2b0380 100644 --- a/sound/soc/intel/skylake/skl-debug.c +++ b/sound/soc/intel/skylake/skl-debug.c @@ -188,7 +188,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf, memset(d->fw_read_buff, 0, FW_REG_BUF); if (w0_stat_sz > 0) - __iowrite32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2); + __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2); for (offset = 0; offset < FW_REG_SIZE; offset += 16) { ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset); diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c index 1132109cb992..e01815cec6fd 100644 --- a/sound/soc/intel/skylake/skl-nhlt.c +++ b/sound/soc/intel/skylake/skl-nhlt.c @@ -225,7 +225,7 @@ int skl_nhlt_update_topology_bin(struct skl *skl) struct hdac_bus *bus = skl_to_bus(skl); struct device *dev = bus->dev; - dev_dbg(dev, "oem_id %.6s, oem_table_id %8s oem_revision %d\n", + dev_dbg(dev, "oem_id %.6s, oem_table_id %.8s oem_revision %d\n", nhlt->header.oem_id, nhlt->header.oem_table_id, nhlt->header.oem_revision); diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c index fce4e050a9b7..b9aacf3d3b29 100644 --- a/sound/soc/sh/rcar/adg.c +++ b/sound/soc/sh/rcar/adg.c @@ -30,6 +30,7 @@ struct rsnd_adg { struct clk *clkout[CLKOUTMAX]; struct clk_onecell_data onecell; struct rsnd_mod mod; + int clk_rate[CLKMAX]; u32 flags; u32 ckr; u32 rbga; @@ -114,9 +115,9 @@ static void __rsnd_adg_get_timesel_ratio(struct rsnd_priv *priv, unsigned int val, en; unsigned int min, diff; unsigned int sel_rate[] = { - clk_get_rate(adg->clk[CLKA]), /* 0000: CLKA */ - clk_get_rate(adg->clk[CLKB]), /* 0001: CLKB */ - clk_get_rate(adg->clk[CLKC]), /* 0010: CLKC */ + adg->clk_rate[CLKA], /* 0000: CLKA */ + adg->clk_rate[CLKB], /* 0001: CLKB */ + adg->clk_rate[CLKC], /* 0010: CLKC */ adg->rbga_rate_for_441khz, /* 0011: RBGA */ adg->rbgb_rate_for_48khz, /* 0100: RBGB */ }; @@ -302,7 +303,7 @@ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate) * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI. */ for_each_rsnd_clk(clk, adg, i) { - if (rate == clk_get_rate(clk)) + if (rate == adg->clk_rate[i]) return sel_table[i]; } @@ -369,10 +370,18 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable) for_each_rsnd_clk(clk, adg, i) { ret = 0; - if (enable) + if (enable) { ret = clk_prepare_enable(clk); - else + + /* + * We shouldn't use clk_get_rate() under + * atomic context. Let's keep it when + * rsnd_adg_clk_enable() was called + */ + adg->clk_rate[i] = clk_get_rate(adg->clk[i]); + } else { clk_disable_unprepare(clk); + } if (ret < 0) dev_warn(dev, "can't use clk %d\n", i); diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index 748f5f641002..d93db2c2b527 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -306,6 +306,12 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd) if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i])) pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE; + + if (rtd->pcm->streams[i].pcm->name[0] == '\0') { + strncpy(rtd->pcm->streams[i].pcm->name, + rtd->pcm->streams[i].pcm->id, + sizeof(rtd->pcm->streams[i].pcm->name)); + } } return 0; diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c index b8b37f082309..0d8437b080bf 100644 --- a/sound/soc/sof/intel/hda-codec.c +++ b/sound/soc/sof/intel/hda-codec.c @@ -62,8 +62,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address) address, resp); #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC) - /* snd_hdac_ext_bus_device_exit will use kfree to free hdev */ - hda_priv = kzalloc(sizeof(*hda_priv), GFP_KERNEL); + hda_priv = devm_kzalloc(sdev->dev, sizeof(*hda_priv), GFP_KERNEL); if (!hda_priv) return -ENOMEM; @@ -82,8 +81,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address) return 0; #else - /* snd_hdac_ext_bus_device_exit will use kfree to free hdev */ - hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); + hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL); if (!hdev) return -ENOMEM; diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c index 334e9d59b1ba..3b8955e755b2 100644 --- a/sound/soc/sof/pcm.c +++ b/sound/soc/sof/pcm.c @@ -208,12 +208,11 @@ static int sof_pcm_hw_params(struct snd_pcm_substream *substream, if (ret < 0) return ret; + spcm->prepared[substream->stream] = true; + /* save pcm hw_params */ memcpy(&spcm->params[substream->stream], params, sizeof(*params)); - /* clear hw_params_upon_resume flag */ - spcm->hw_params_upon_resume[substream->stream] = 0; - return ret; } @@ -236,6 +235,9 @@ static int sof_pcm_hw_free(struct snd_pcm_substream *substream) if (!spcm) return -EINVAL; + if (!spcm->prepared[substream->stream]) + return 0; + dev_dbg(sdev->dev, "pcm: free stream %d dir %d\n", spcm->pcm.pcm_id, substream->stream); @@ -258,6 +260,8 @@ static int sof_pcm_hw_free(struct snd_pcm_substream *substream) if (ret < 0) dev_err(sdev->dev, "error: platform hw free failed\n"); + spcm->prepared[substream->stream] = false; + return ret; } @@ -278,11 +282,7 @@ static int sof_pcm_prepare(struct snd_pcm_substream *substream) if (!spcm) return -EINVAL; - /* - * check if hw_params needs to be set-up again. - * This is only needed when resuming from system sleep. - */ - if (!spcm->hw_params_upon_resume[substream->stream]) + if (spcm->prepared[substream->stream]) return 0; dev_dbg(sdev->dev, "pcm: prepare stream %d dir %d\n", spcm->pcm.pcm_id, @@ -311,6 +311,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd) struct snd_sof_pcm *spcm; struct sof_ipc_stream stream; struct sof_ipc_reply reply; + bool reset_hw_params = false; int ret; /* nothing to do for BE */ @@ -351,6 +352,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd) case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_STOP: stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP; + reset_hw_params = true; break; default: dev_err(sdev->dev, "error: unhandled trigger cmd %d\n", cmd); @@ -363,17 +365,17 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd) ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream, sizeof(stream), &reply, sizeof(reply)); - if (ret < 0 || cmd != SNDRV_PCM_TRIGGER_SUSPEND) + if (ret < 0 || !reset_hw_params) return ret; /* - * The hw_free op is usually called when the pcm stream is closed. - * Since the stream is not closed during suspend, the DSP needs to be - * notified explicitly to free pcm to prevent errors upon resume. + * In case of stream is stopped, DSP must be reprogrammed upon + * restart, so free PCM here. */ stream.hdr.size = sizeof(stream); stream.hdr.cmd = SOF_IPC_GLB_STREAM_MSG | SOF_IPC_STREAM_PCM_FREE; stream.comp_id = spcm->stream[substream->stream].comp_id; + spcm->prepared[substream->stream] = false; /* send IPC to the DSP */ return sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream, @@ -481,6 +483,7 @@ static int sof_pcm_open(struct snd_pcm_substream *substream) spcm->stream[substream->stream].posn.host_posn = 0; spcm->stream[substream->stream].posn.dai_posn = 0; spcm->stream[substream->stream].substream = substream; + spcm->prepared[substream->stream] = false; ret = snd_sof_pcm_platform_open(sdev, substream); if (ret < 0) diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c index 278abfd10490..48c6d78d72e2 100644 --- a/sound/soc/sof/pm.c +++ b/sound/soc/sof/pm.c @@ -233,7 +233,7 @@ static int sof_set_hw_params_upon_resume(struct snd_sof_dev *sdev) state = substream->runtime->status->state; if (state == SNDRV_PCM_STATE_SUSPENDED) - spcm->hw_params_upon_resume[dir] = 1; + spcm->prepared[dir] = false; } } diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c index 65d1bac4c6b8..6fd3df7c57a3 100644 --- a/sound/soc/sof/sof-pci-dev.c +++ b/sound/soc/sof/sof-pci-dev.c @@ -223,6 +223,9 @@ static void sof_pci_probe_complete(struct device *dev) */ pm_runtime_allow(dev); + /* mark last_busy for pm_runtime to make sure not suspend immediately */ + pm_runtime_mark_last_busy(dev); + /* follow recommendation in pci-driver.c to decrement usage counter */ pm_runtime_put_noidle(dev); } diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h index b8c0b2a22684..fa5cb7d2a660 100644 --- a/sound/soc/sof/sof-priv.h +++ b/sound/soc/sof/sof-priv.h @@ -297,7 +297,7 @@ struct snd_sof_pcm { struct snd_sof_pcm_stream stream[2]; struct list_head list; /* list in sdev pcm list */ struct snd_pcm_hw_params params[2]; - int hw_params_upon_resume[2]; /* set up hw_params upon resume */ + bool prepared[2]; /* PCM_PARAMS set successfully */ }; /* ALSA SOF Kcontrol device */ diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c index 7fa5c61169db..ab8cb83c8b1a 100644 --- a/sound/soc/sunxi/sun4i-i2s.c +++ b/sound/soc/sunxi/sun4i-i2s.c @@ -222,10 +222,11 @@ static const struct sun4i_i2s_clk_div sun4i_i2s_mclk_div[] = { }; static int sun4i_i2s_get_bclk_div(struct sun4i_i2s *i2s, - unsigned int oversample_rate, + unsigned long parent_rate, + unsigned int sampling_rate, unsigned int word_size) { - int div = oversample_rate / word_size / 2; + int div = parent_rate / sampling_rate / word_size / 2; int i; for (i = 0; i < ARRAY_SIZE(sun4i_i2s_bclk_div); i++) { @@ -315,8 +316,8 @@ static int sun4i_i2s_set_clk_rate(struct snd_soc_dai *dai, return -EINVAL; } - bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate, - word_size); + bclk_div = sun4i_i2s_get_bclk_div(i2s, i2s->mclk_freq, + rate, word_size); if (bclk_div < 0) { dev_err(dai->dev, "Unsupported BCLK divider: %d\n", bclk_div); return -EINVAL; diff --git a/sound/soc/uniphier/aio-cpu.c b/sound/soc/uniphier/aio-cpu.c index ee90e6c3937c..2ae582a99b63 100644 --- a/sound/soc/uniphier/aio-cpu.c +++ b/sound/soc/uniphier/aio-cpu.c @@ -424,8 +424,11 @@ int uniphier_aio_dai_suspend(struct snd_soc_dai *dai) { struct uniphier_aio *aio = uniphier_priv(dai); - reset_control_assert(aio->chip->rst); - clk_disable_unprepare(aio->chip->clk); + aio->chip->num_wup_aios--; + if (!aio->chip->num_wup_aios) { + reset_control_assert(aio->chip->rst); + clk_disable_unprepare(aio->chip->clk); + } return 0; } @@ -439,13 +442,15 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai) if (!aio->chip->active) return 0; - ret = clk_prepare_enable(aio->chip->clk); - if (ret) - return ret; + if (!aio->chip->num_wup_aios) { + ret = clk_prepare_enable(aio->chip->clk); + if (ret) + return ret; - ret = reset_control_deassert(aio->chip->rst); - if (ret) - goto err_out_clock; + ret = reset_control_deassert(aio->chip->rst); + if (ret) + goto err_out_clock; + } aio_iecout_set_enable(aio->chip, true); aio_chip_init(aio->chip); @@ -458,7 +463,7 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai) ret = aio_init(sub); if (ret) - goto err_out_clock; + goto err_out_reset; if (!sub->setting) continue; @@ -466,11 +471,16 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai) aio_port_reset(sub); aio_src_reset(sub); } + aio->chip->num_wup_aios++; return 0; +err_out_reset: + if (!aio->chip->num_wup_aios) + reset_control_assert(aio->chip->rst); err_out_clock: - clk_disable_unprepare(aio->chip->clk); + if (!aio->chip->num_wup_aios) + clk_disable_unprepare(aio->chip->clk); return ret; } @@ -619,6 +629,7 @@ int uniphier_aio_probe(struct platform_device *pdev) return PTR_ERR(chip->rst); chip->num_aios = chip->chip_spec->num_dais; + chip->num_wup_aios = chip->num_aios; chip->aios = devm_kcalloc(dev, chip->num_aios, sizeof(struct uniphier_aio), GFP_KERNEL); diff --git a/sound/soc/uniphier/aio.h b/sound/soc/uniphier/aio.h index ca6ccbae0ee8..a7ff7e556429 100644 --- a/sound/soc/uniphier/aio.h +++ b/sound/soc/uniphier/aio.h @@ -285,6 +285,7 @@ struct uniphier_aio_chip { struct uniphier_aio *aios; int num_aios; + int num_wup_aios; struct uniphier_aio_pll *plls; int num_plls; diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index e4bbf79de956..33cd26763c0e 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -457,6 +457,7 @@ static int set_sync_endpoint(struct snd_usb_substream *subs, } ep = get_endpoint(alts, 1)->bEndpointAddress; if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && + get_endpoint(alts, 0)->bSynchAddress != 0 && ((is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) || (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) { dev_err(&dev->dev, diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h index 57aaeaf8e192..edba4d93e9e6 100644 --- a/tools/include/uapi/asm/bitsperlong.h +++ b/tools/include/uapi/asm/bitsperlong.h @@ -1,22 +1,22 @@ /* SPDX-License-Identifier: GPL-2.0 */ #if defined(__i386__) || defined(__x86_64__) -#include "../../arch/x86/include/uapi/asm/bitsperlong.h" +#include "../../../arch/x86/include/uapi/asm/bitsperlong.h" #elif defined(__aarch64__) -#include "../../arch/arm64/include/uapi/asm/bitsperlong.h" +#include "../../../arch/arm64/include/uapi/asm/bitsperlong.h" #elif defined(__powerpc__) -#include "../../arch/powerpc/include/uapi/asm/bitsperlong.h" +#include "../../../arch/powerpc/include/uapi/asm/bitsperlong.h" #elif defined(__s390__) -#include "../../arch/s390/include/uapi/asm/bitsperlong.h" +#include "../../../arch/s390/include/uapi/asm/bitsperlong.h" #elif defined(__sparc__) -#include "../../arch/sparc/include/uapi/asm/bitsperlong.h" +#include "../../../arch/sparc/include/uapi/asm/bitsperlong.h" #elif defined(__mips__) -#include "../../arch/mips/include/uapi/asm/bitsperlong.h" +#include "../../../arch/mips/include/uapi/asm/bitsperlong.h" #elif defined(__ia64__) -#include "../../arch/ia64/include/uapi/asm/bitsperlong.h" +#include "../../../arch/ia64/include/uapi/asm/bitsperlong.h" #elif defined(__riscv) -#include "../../arch/riscv/include/uapi/asm/bitsperlong.h" +#include "../../../arch/riscv/include/uapi/asm/bitsperlong.h" #elif defined(__alpha__) -#include "../../arch/alpha/include/uapi/asm/bitsperlong.h" +#include "../../../arch/alpha/include/uapi/asm/bitsperlong.h" #else #include <asm-generic/bitsperlong.h> #endif diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index 3292c290654f..86ce17a1f7fb 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile @@ -62,15 +62,15 @@ set_plugin_dir := 1 # Set plugin_dir to preffered global plugin location # If we install under $HOME directory we go under -# $(HOME)/.traceevent/plugins +# $(HOME)/.local/lib/traceevent/plugins # # We dont set PLUGIN_DIR in case we install under $HOME # directory, because by default the code looks under: -# $(HOME)/.traceevent/plugins by default. +# $(HOME)/.local/lib/traceevent/plugins by default. # ifeq ($(plugin_dir),) ifeq ($(prefix),$(HOME)) -override plugin_dir = $(HOME)/.traceevent/plugins +override plugin_dir = $(HOME)/.local/lib/traceevent/plugins set_plugin_dir := 0 else override plugin_dir = $(libdir)/traceevent/plugins diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c index 8ca28de9337a..e1f7ddd5a6cf 100644 --- a/tools/lib/traceevent/event-plugin.c +++ b/tools/lib/traceevent/event-plugin.c @@ -18,7 +18,7 @@ #include "event-utils.h" #include "trace-seq.h" -#define LOCAL_PLUGIN_DIR ".traceevent/plugins" +#define LOCAL_PLUGIN_DIR ".local/lib/traceevent/plugins/" static struct registered_plugin_options { struct registered_plugin_options *next; diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c index 865a9762f22e..3f84403c0983 100644 --- a/tools/perf/arch/x86/util/kvm-stat.c +++ b/tools/perf/arch/x86/util/kvm-stat.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <errno.h> -#include "../../util/kvm-stat.h" -#include "../../util/evsel.h" +#include "../../../util/kvm-stat.h" +#include "../../../util/evsel.h" #include <asm/svm.h> #include <asm/vmx.h> #include <asm/kvm.h> diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c index 950539f9a4f7..b1eb963b4a6e 100644 --- a/tools/perf/arch/x86/util/tsc.c +++ b/tools/perf/arch/x86/util/tsc.c @@ -5,10 +5,10 @@ #include <linux/stddef.h> #include <linux/perf_event.h> -#include "../../perf.h" +#include "../../../perf.h" #include <linux/types.h> -#include "../../util/debug.h" -#include "../../util/tsc.h" +#include "../../../util/debug.h" +#include "../../../util/tsc.h" int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, struct perf_tsc_conversion *tc) diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 97e2628ea5dd..d4e4d53e8b44 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -441,6 +441,9 @@ int main(int argc, const char **argv) srandom(time(NULL)); + /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ + config_exclusive_filename = getenv("PERF_CONFIG"); + err = perf_config(perf_default_config, NULL); if (err) return err; diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh index 45d269b0157e..11cc2af13f2b 100755 --- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh +++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh @@ -32,6 +32,10 @@ if [ $err -ne 0 ] ; then exit $err fi +# Do not use whatever ~/.perfconfig file, it may change the output +# via trace.{show_timestamp,show_prefix,etc} +export PERF_CONFIG=/dev/null + trace_open_vfs_getname err=$? rm -f ${file} diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c index 52242fa4072b..e19eb6ea361d 100644 --- a/tools/perf/trace/beauty/ioctl.c +++ b/tools/perf/trace/beauty/ioctl.c @@ -21,7 +21,7 @@ static size_t ioctl__scnprintf_tty_cmd(int nr, int dir, char *bf, size_t size) { static const char *ioctl_tty_cmd[] = { - "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW", + [_IOC_NR(TCGETS)] = "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW", "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL", "TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI", "TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC", "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c index 4d565cc14076..0355d4aaf2ee 100644 --- a/tools/perf/ui/browsers/scripts.c +++ b/tools/perf/ui/browsers/scripts.c @@ -131,8 +131,10 @@ static int list_scripts(char *script_name, bool *custom, int key = ui_browser__input_window("perf script command", "Enter perf script command line (without perf script prefix)", script_args, "", 0); - if (key != K_ENTER) - return -1; + if (key != K_ENTER) { + ret = -1; + goto out; + } sprintf(script_name, "%s script %s", perf, script_args); } else if (choice < num + max_std) { strcpy(script_name, paths[choice]); diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c index b3c421429ed4..54bcd08df87e 100644 --- a/tools/perf/ui/helpline.c +++ b/tools/perf/ui/helpline.c @@ -3,10 +3,10 @@ #include <stdlib.h> #include <string.h> -#include "../debug.h" +#include "../util/debug.h" #include "helpline.h" #include "ui.h" -#include "../util.h" +#include "../util/util.h" char ui_helpline__current[512]; diff --git a/tools/perf/ui/util.c b/tools/perf/ui/util.c index 63bf06e80ab9..9ed76e88a3e4 100644 --- a/tools/perf/ui/util.c +++ b/tools/perf/ui/util.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include "util.h" -#include "../debug.h" +#include "../util/debug.h" /* diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index b0364d923f76..070c3bd57882 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -20,6 +20,7 @@ #include "bpf-event.h" #include <signal.h> #include <unistd.h> +#include <sched.h> #include "parse-events.h" #include <subcmd/parse-options.h> @@ -1870,6 +1871,14 @@ static void *perf_evlist__poll_thread(void *arg) struct perf_evlist *evlist = arg; bool draining = false; int i, done = 0; + /* + * In order to read symbols from other namespaces perf to needs to call + * setns(2). This isn't permitted if the struct_fs has multiple users. + * unshare(2) the fs so that we may continue to setns into namespaces + * that we're observing when, for instance, reading the build-ids at + * the end of a 'perf record' session. + */ + unshare(CLONE_FS); while (!done) { bool got_data = false; diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 1903d7ec9797..bf7cf1249553 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2251,8 +2251,10 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) /* On s390 the socket_id number is not related to the numbers of cpus. * The socket_id number might be higher than the numbers of cpus. * This depends on the configuration. + * AArch64 is the same. */ - if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4)) + if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4) + || !strncmp(ph->env.arch, "aarch64", 7))) do_core_id_test = false; for (i = 0; i < (u32)cpu_nr; i++) { diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index f24fd1954f6c..6bd270a1e93e 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -193,7 +193,10 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); - hists__new_col_len(hists, HISTC_TIME, 12); + if (symbol_conf.nanosecs) + hists__new_col_len(hists, HISTC_TIME, 16); + else + hists__new_col_len(hists, HISTC_TIME, 12); if (h->srcline) { len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 668410b1d426..7666206d06fa 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -647,6 +647,7 @@ struct map_groups *map_groups__new(struct machine *machine) void map_groups__delete(struct map_groups *mg) { map_groups__exit(mg); + unwind__finish_access(mg); free(mg); } @@ -887,7 +888,7 @@ int map_groups__clone(struct thread *thread, struct map_groups *parent) if (new == NULL) goto out_unlock; - err = unwind__prepare_access(thread, new, NULL); + err = unwind__prepare_access(mg, new, NULL); if (err) goto out_unlock; diff --git a/tools/perf/util/map_groups.h b/tools/perf/util/map_groups.h index 5f25efa6d6bc..77252e14008f 100644 --- a/tools/perf/util/map_groups.h +++ b/tools/perf/util/map_groups.h @@ -31,6 +31,10 @@ struct map_groups { struct maps maps; struct machine *machine; refcount_t refcnt; +#ifdef HAVE_LIBUNWIND_SUPPORT + void *addr_space; + struct unwind_libunwind_ops *unwind_libunwind_ops; +#endif }; #define KMAP_NAME_LEN 256 diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 590793cc5142..bbf7816cba31 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -105,7 +105,6 @@ void thread__delete(struct thread *thread) } up_write(&thread->comm_lock); - unwind__finish_access(thread); nsinfo__zput(thread->nsinfo); srccode_state_free(&thread->srccode_state); @@ -252,7 +251,7 @@ static int ____thread__set_comm(struct thread *thread, const char *str, list_add(&new->list, &thread->comm_list); if (exec) - unwind__flush_access(thread); + unwind__flush_access(thread->mg); } thread->comm_set = true; @@ -332,7 +331,7 @@ int thread__insert_map(struct thread *thread, struct map *map) { int ret; - ret = unwind__prepare_access(thread, map, NULL); + ret = unwind__prepare_access(thread->mg, map, NULL); if (ret) return ret; @@ -352,7 +351,7 @@ static int __thread__prepare_access(struct thread *thread) down_read(&maps->lock); for (map = maps__first(maps); map; map = map__next(map)) { - err = unwind__prepare_access(thread, map, &initialized); + err = unwind__prepare_access(thread->mg, map, &initialized); if (err || initialized) break; } diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index e97ef6977eb9..bf06113be4f3 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -44,10 +44,6 @@ struct thread { struct thread_stack *ts; struct nsinfo *nsinfo; struct srccode_state srccode_state; -#ifdef HAVE_LIBUNWIND_SUPPORT - void *addr_space; - struct unwind_libunwind_ops *unwind_libunwind_ops; -#endif bool filter; int filter_entry_depth; }; diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c index 71a788921b62..ebdbb056510c 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c @@ -616,26 +616,26 @@ static unw_accessors_t accessors = { .get_proc_name = get_proc_name, }; -static int _unwind__prepare_access(struct thread *thread) +static int _unwind__prepare_access(struct map_groups *mg) { - thread->addr_space = unw_create_addr_space(&accessors, 0); - if (!thread->addr_space) { + mg->addr_space = unw_create_addr_space(&accessors, 0); + if (!mg->addr_space) { pr_err("unwind: Can't create unwind address space.\n"); return -ENOMEM; } - unw_set_caching_policy(thread->addr_space, UNW_CACHE_GLOBAL); + unw_set_caching_policy(mg->addr_space, UNW_CACHE_GLOBAL); return 0; } -static void _unwind__flush_access(struct thread *thread) +static void _unwind__flush_access(struct map_groups *mg) { - unw_flush_cache(thread->addr_space, 0, 0); + unw_flush_cache(mg->addr_space, 0, 0); } -static void _unwind__finish_access(struct thread *thread) +static void _unwind__finish_access(struct map_groups *mg) { - unw_destroy_addr_space(thread->addr_space); + unw_destroy_addr_space(mg->addr_space); } static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, @@ -660,7 +660,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, */ if (max_stack - 1 > 0) { WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL"); - addr_space = ui->thread->addr_space; + addr_space = ui->thread->mg->addr_space; if (addr_space == NULL) return -1; diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c index c0811977d7d5..b843f9d0a9ea 100644 --- a/tools/perf/util/unwind-libunwind.c +++ b/tools/perf/util/unwind-libunwind.c @@ -11,13 +11,13 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops; -static void unwind__register_ops(struct thread *thread, +static void unwind__register_ops(struct map_groups *mg, struct unwind_libunwind_ops *ops) { - thread->unwind_libunwind_ops = ops; + mg->unwind_libunwind_ops = ops; } -int unwind__prepare_access(struct thread *thread, struct map *map, +int unwind__prepare_access(struct map_groups *mg, struct map *map, bool *initialized) { const char *arch; @@ -28,7 +28,7 @@ int unwind__prepare_access(struct thread *thread, struct map *map, if (!dwarf_callchain_users) return 0; - if (thread->addr_space) { + if (mg->addr_space) { pr_debug("unwind: thread map already set, dso=%s\n", map->dso->name); if (initialized) @@ -37,14 +37,14 @@ int unwind__prepare_access(struct thread *thread, struct map *map, } /* env->arch is NULL for live-mode (i.e. perf top) */ - if (!thread->mg->machine->env || !thread->mg->machine->env->arch) + if (!mg->machine->env || !mg->machine->env->arch) goto out_register; - dso_type = dso__type(map->dso, thread->mg->machine); + dso_type = dso__type(map->dso, mg->machine); if (dso_type == DSO__TYPE_UNKNOWN) return 0; - arch = perf_env__arch(thread->mg->machine->env); + arch = perf_env__arch(mg->machine->env); if (!strcmp(arch, "x86")) { if (dso_type != DSO__TYPE_64BIT) @@ -59,37 +59,37 @@ int unwind__prepare_access(struct thread *thread, struct map *map, return 0; } out_register: - unwind__register_ops(thread, ops); + unwind__register_ops(mg, ops); - err = thread->unwind_libunwind_ops->prepare_access(thread); + err = mg->unwind_libunwind_ops->prepare_access(mg); if (initialized) *initialized = err ? false : true; return err; } -void unwind__flush_access(struct thread *thread) +void unwind__flush_access(struct map_groups *mg) { if (!dwarf_callchain_users) return; - if (thread->unwind_libunwind_ops) - thread->unwind_libunwind_ops->flush_access(thread); + if (mg->unwind_libunwind_ops) + mg->unwind_libunwind_ops->flush_access(mg); } -void unwind__finish_access(struct thread *thread) +void unwind__finish_access(struct map_groups *mg) { if (!dwarf_callchain_users) return; - if (thread->unwind_libunwind_ops) - thread->unwind_libunwind_ops->finish_access(thread); + if (mg->unwind_libunwind_ops) + mg->unwind_libunwind_ops->finish_access(mg); } int unwind__get_entries(unwind_entry_cb_t cb, void *arg, struct thread *thread, struct perf_sample *data, int max_stack) { - if (thread->unwind_libunwind_ops) - return thread->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack); + if (thread->mg->unwind_libunwind_ops) + return thread->mg->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack); return 0; } diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h index 8a44a1569a21..3a7d00c20d86 100644 --- a/tools/perf/util/unwind.h +++ b/tools/perf/util/unwind.h @@ -6,6 +6,7 @@ #include <linux/types.h> struct map; +struct map_groups; struct perf_sample; struct symbol; struct thread; @@ -19,9 +20,9 @@ struct unwind_entry { typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg); struct unwind_libunwind_ops { - int (*prepare_access)(struct thread *thread); - void (*flush_access)(struct thread *thread); - void (*finish_access)(struct thread *thread); + int (*prepare_access)(struct map_groups *mg); + void (*flush_access)(struct map_groups *mg); + void (*finish_access)(struct map_groups *mg); int (*get_entries)(unwind_entry_cb_t cb, void *arg, struct thread *thread, struct perf_sample *data, int max_stack); @@ -46,20 +47,20 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, #endif int LIBUNWIND__ARCH_REG_ID(int regnum); -int unwind__prepare_access(struct thread *thread, struct map *map, +int unwind__prepare_access(struct map_groups *mg, struct map *map, bool *initialized); -void unwind__flush_access(struct thread *thread); -void unwind__finish_access(struct thread *thread); +void unwind__flush_access(struct map_groups *mg); +void unwind__finish_access(struct map_groups *mg); #else -static inline int unwind__prepare_access(struct thread *thread __maybe_unused, +static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused, struct map *map __maybe_unused, bool *initialized __maybe_unused) { return 0; } -static inline void unwind__flush_access(struct thread *thread __maybe_unused) {} -static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} +static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {} +static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {} #endif #else static inline int @@ -72,14 +73,14 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused, return 0; } -static inline int unwind__prepare_access(struct thread *thread __maybe_unused, +static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused, struct map *map __maybe_unused, bool *initialized __maybe_unused) { return 0; } -static inline void unwind__flush_access(struct thread *thread __maybe_unused) {} -static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} +static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {} +static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {} #endif /* HAVE_DWARF_UNWIND_SUPPORT */ #endif /* __UNWIND_H */ diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h index 7ffe562e7ae7..2627b038b6f2 100644 --- a/tools/perf/util/xyarray.h +++ b/tools/perf/util/xyarray.h @@ -2,6 +2,7 @@ #ifndef _PERF_XYARRAY_H_ #define _PERF_XYARRAY_H_ 1 +#include <linux/compiler.h> #include <sys/types.h> struct xyarray { @@ -10,7 +11,7 @@ struct xyarray { size_t entries; size_t max_x; size_t max_y; - char contents[]; + char contents[] __aligned(8); }; struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size); diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c index 91c5ad1685a1..6a10dea01eef 100644 --- a/tools/power/x86/intel-speed-select/isst-config.c +++ b/tools/power/x86/intel-speed-select/isst-config.c @@ -603,6 +603,10 @@ static int isst_fill_platform_info(void) close(fd); + if (isst_platform_info.api_version > supported_api_ver) { + printf("Incompatible API versions; Upgrade of tool is required\n"); + return -1; + } return 0; } @@ -1529,6 +1533,7 @@ static void cmdline(int argc, char **argv) { int opt; int option_index = 0; + int ret; static struct option long_options[] = { { "cpu", required_argument, 0, 'c' }, @@ -1590,13 +1595,14 @@ static void cmdline(int argc, char **argv) set_max_cpu_num(); set_cpu_present_cpu_mask(); set_cpu_target_cpu_mask(); - isst_fill_platform_info(); - if (isst_platform_info.api_version > supported_api_ver) { - printf("Incompatible API versions; Upgrade of tool is required\n"); - exit(0); - } + ret = isst_fill_platform_info(); + if (ret) + goto out; process_command(argc, argv); +out: + free_cpu_set(present_cpumask); + free_cpu_set(target_cpumask); } int main(int argc, char **argv) diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh index e6828732843e..9dc35a16e415 100755 --- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh +++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh @@ -15,6 +15,8 @@ PAUSE_ON_FAIL=no VERBOSE=0 +which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping) + ################################################################################ # helpers @@ -200,7 +202,7 @@ validate_v6_exception() local rc if [ ${ping_sz} != "0" ]; then - run_cmd ip netns exec h0 ping6 -s ${ping_sz} -c5 -w5 ${dst} + run_cmd ip netns exec h0 ${ping6} -s ${ping_sz} -c5 -w5 ${dst} fi if [ "$VERBOSE" = "1" ]; then @@ -243,7 +245,7 @@ do run_cmd taskset -c ${c} ip netns exec h0 ping -c1 -w1 172.16.10${i}.1 [ $? -ne 0 ] && printf "\nERROR: ping to h${i} failed\n" && ret=1 - run_cmd taskset -c ${c} ip netns exec h0 ping6 -c1 -w1 2001:db8:10${i}::1 + run_cmd taskset -c ${c} ip netns exec h0 ${ping6} -c1 -w1 2001:db8:10${i}::1 [ $? -ne 0 ] && printf "\nERROR: ping6 to h${i} failed\n" && ret=1 [ $ret -ne 0 ] && break diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 4465fc2dae14..c4ba0ff4a53f 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -9,7 +9,7 @@ ret=0 ksft_skip=4 # all tests in this script. Can be overridden with -t option -TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter" +TESTS="unregister down carrier nexthop suppress ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter" VERBOSE=0 PAUSE_ON_FAIL=no @@ -17,6 +17,8 @@ PAUSE=no IP="ip -netns ns1" NS_EXEC="ip netns exec ns1" +which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping) + log_test() { local rc=$1 @@ -614,6 +616,20 @@ fib_nexthop_test() cleanup } +fib_suppress_test() +{ + $IP link add dummy1 type dummy + $IP link set dummy1 up + $IP -6 route add default dev dummy1 + $IP -6 rule add table main suppress_prefixlength 0 + ping -f -c 1000 -W 1 1234::1 || true + $IP -6 rule del table main suppress_prefixlength 0 + $IP link del dummy1 + + # If we got here without crashing, we're good. + return 0 +} + ################################################################################ # Tests on route add and replace @@ -1086,7 +1102,7 @@ ipv6_route_metrics_test() log_test $rc 0 "Multipath route with mtu metric" $IP -6 ro add 2001:db8:104::/64 via 2001:db8:101::2 mtu 1300 - run_cmd "ip netns exec ns1 ping6 -w1 -c1 -s 1500 2001:db8:104::1" + run_cmd "ip netns exec ns1 ${ping6} -w1 -c1 -s 1500 2001:db8:104::1" log_test $? 0 "Using route with mtu metric" run_cmd "$IP -6 ro add 2001:db8:114::/64 via 2001:db8:101::2 congctl lock foo" @@ -1591,6 +1607,7 @@ do fib_carrier_test|carrier) fib_carrier_test;; fib_rp_filter_test|rp_filter) fib_rp_filter_test;; fib_nexthop_test|nexthop) fib_nexthop_test;; + fib_suppress_test|suppress) fib_suppress_test;; ipv6_route_test|ipv6_rt) ipv6_route_test;; ipv4_route_test|ipv4_rt) ipv4_route_test;; ipv6_addr_metric) ipv6_addr_metric_test;;