diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 27ec49af1bf2..2569e7f19b47 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4749,7 +4749,9 @@ none - Limited to cond_resched() calls voluntary - Limited to cond_resched() and might_sleep() calls full - Any section that isn't explicitly preempt disabled - can be preempted anytime. + can be preempted anytime. Tasks will also yield + contended spinlocks (if the critical section isn't + explicitly preempt disabled beyond the lock itself). print-fatal-signals= [KNL] debug: print fatal signals diff --git a/Documentation/arch/powerpc/kvm-nested.rst b/Documentation/arch/powerpc/kvm-nested.rst index 630602a8aa00..5defd13cc6c1 100644 --- a/Documentation/arch/powerpc/kvm-nested.rst +++ b/Documentation/arch/powerpc/kvm-nested.rst @@ -546,7 +546,9 @@ table information. +--------+-------+----+--------+----------------------------------+ | 0x1052 | 0x08 | RW | T | CTRL | +--------+-------+----+--------+----------------------------------+ -| 0x1053-| | | | Reserved | +| 0x1053 | 0x08 | RW | T | DPDES | ++--------+-------+----+--------+----------------------------------+ +| 0x1054-| | | | Reserved | | 0x1FFF | | | | | +--------+-------+----+--------+----------------------------------+ | 0x2000 | 0x04 | RW | T | CR | diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml index 325585bc881b..212e56eb1bec 100644 --- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml @@ -20,7 +20,7 @@ properties: - qcom,ipq8074-qmp-usb3-phy - qcom,ipq9574-qmp-usb3-phy - qcom,msm8996-qmp-usb3-phy - - com,qdu1000-qmp-usb3-uni-phy + - qcom,qdu1000-qmp-usb3-uni-phy - qcom,sa8775p-qmp-usb3-uni-phy - qcom,sc8280xp-qmp-usb3-uni-phy - qcom,sdm845-qmp-usb3-uni-phy diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml index 68398e7e8655..606b80965a44 100644 --- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml +++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml @@ -49,7 +49,10 @@ properties: to take when the temperature crosses those thresholds. patternProperties: - "^[a-zA-Z][a-zA-Z0-9\\-]{1,12}-thermal$": + # Node name is limited in size due to Linux kernel requirements - 19 + # characters in total (see THERMAL_NAME_LENGTH, including terminating NUL + # byte): + "^[a-zA-Z][a-zA-Z0-9\\-]{1,10}-thermal$": type: object description: Each thermal zone node contains information about how frequently it diff --git a/Documentation/networking/xsk-tx-metadata.rst b/Documentation/networking/xsk-tx-metadata.rst index bd033fe95cca..e76b0cfc32f7 100644 --- a/Documentation/networking/xsk-tx-metadata.rst +++ b/Documentation/networking/xsk-tx-metadata.rst @@ -11,12 +11,16 @@ metadata on the receive side. General Design ============== -The headroom for the metadata is reserved via ``tx_metadata_len`` in -``struct xdp_umem_reg``. The metadata length is therefore the same for -every socket that shares the same umem. The metadata layout is a fixed UAPI, -refer to ``union xsk_tx_metadata`` in ``include/uapi/linux/if_xdp.h``. -Thus, generally, the ``tx_metadata_len`` field above should contain -``sizeof(union xsk_tx_metadata)``. +The headroom for the metadata is reserved via ``tx_metadata_len`` and +``XDP_UMEM_TX_METADATA_LEN`` flag in ``struct xdp_umem_reg``. The metadata +length is therefore the same for every socket that shares the same umem. +The metadata layout is a fixed UAPI, refer to ``union xsk_tx_metadata`` in +``include/uapi/linux/if_xdp.h``. Thus, generally, the ``tx_metadata_len`` +field above should contain ``sizeof(union xsk_tx_metadata)``. + +Note that in the original implementation the ``XDP_UMEM_TX_METADATA_LEN`` +flag was not required. Applications might attempt to create a umem +with a flag first and if it fails, do another attempt without a flag. The headroom and the metadata itself should be located right before ``xdp_desc->addr`` in the umem frame. Within a frame, the metadata diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index a71d91978d9e..eec8df1dde06 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -1403,6 +1403,12 @@ Instead, an abort (data abort if the cause of the page-table update was a load or a store, instruction abort if it was an instruction fetch) is injected in the guest. +S390: +^^^^^ + +Returns -EINVAL if the VM has the KVM_VM_S390_UCONTROL flag set. +Returns -EINVAL if called on a protected VM. + 4.36 KVM_SET_TSS_ADDR --------------------- @@ -6273,6 +6279,12 @@ state. At VM creation time, all memory is shared, i.e. the PRIVATE attribute is '0' for all gfns. Userspace can control whether memory is shared/private by toggling KVM_MEMORY_ATTRIBUTE_PRIVATE via KVM_SET_MEMORY_ATTRIBUTES as needed. +S390: +^^^^^ + +Returns -EINVAL if the VM has the KVM_VM_S390_UCONTROL flag set. +Returns -EINVAL if called on a protected VM. + 4.141 KVM_SET_MEMORY_ATTRIBUTES ------------------------------- diff --git a/Makefile b/Makefile index 07e1aec72c17..c0af6d8aeb05 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 10 -SUBLEVEL = 2 +SUBLEVEL = 3 EXTRAVERSION = NAME = Baby Opossum Posse diff --git a/arch/arm/boot/dts/allwinner/Makefile b/arch/arm/boot/dts/allwinner/Makefile index 4247f19b1adc..cd0d044882cf 100644 --- a/arch/arm/boot/dts/allwinner/Makefile +++ b/arch/arm/boot/dts/allwinner/Makefile @@ -261,68 +261,6 @@ dtb-$(CONFIG_MACH_SUN8I) += \ sun8i-v3s-licheepi-zero.dtb \ sun8i-v3s-licheepi-zero-dock.dtb \ sun8i-v40-bananapi-m2-berry.dtb -dtb-$(CONFIG_MACH_SUN8I) += \ - sun8i-a23-evb.dtb \ - sun8i-a23-gt90h-v4.dtb \ - sun8i-a23-inet86dz.dtb \ - sun8i-a23-ippo-q8h-v5.dtb \ - sun8i-a23-ippo-q8h-v1.2.dtb \ - sun8i-a23-polaroid-mid2407pxe03.dtb \ - sun8i-a23-polaroid-mid2809pxe04.dtb \ - sun8i-a23-q8-tablet.dtb \ - sun8i-a33-et-q8-v1.6.dtb \ - sun8i-a33-ga10h-v1.1.dtb \ - sun8i-a33-inet-d978-rev2.dtb \ - sun8i-a33-ippo-q8h-v1.2.dtb \ - sun8i-a33-olinuxino.dtb \ - sun8i-a33-q8-tablet.dtb \ - sun8i-a33-sinlinx-sina33.dtb \ - sun8i-a83t-allwinner-h8homlet-v2.dtb \ - sun8i-a83t-bananapi-m3.dtb \ - sun8i-a83t-cubietruck-plus.dtb \ - sun8i-a83t-tbs-a711.dtb \ - sun8i-h2-plus-bananapi-m2-zero.dtb \ - sun8i-h2-plus-libretech-all-h3-cc.dtb \ - sun8i-h2-plus-orangepi-r1.dtb \ - sun8i-h2-plus-orangepi-zero.dtb \ - sun8i-h3-bananapi-m2-plus.dtb \ - sun8i-h3-bananapi-m2-plus-v1.2.dtb \ - sun8i-h3-beelink-x2.dtb \ - sun8i-h3-libretech-all-h3-cc.dtb \ - sun8i-h3-mapleboard-mp130.dtb \ - sun8i-h3-nanopi-duo2.dtb \ - sun8i-h3-nanopi-m1.dtb\ - \ - sun8i-h3-nanopi-m1-plus.dtb \ - sun8i-h3-nanopi-neo.dtb \ - sun8i-h3-nanopi-neo-air.dtb \ - sun8i-h3-nanopi-r1.dtb \ - sun8i-h3-orangepi-2.dtb \ - sun8i-h3-orangepi-lite.dtb \ - sun8i-h3-orangepi-one.dtb \ - sun8i-h3-orangepi-pc.dtb \ - sun8i-h3-orangepi-pc-plus.dtb \ - sun8i-h3-orangepi-plus.dtb \ - sun8i-h3-orangepi-plus2e.dtb \ - sun8i-h3-orangepi-zero-plus2.dtb \ - sun8i-h3-rervision-dvk.dtb \ - sun8i-h3-zeropi.dtb \ - sun8i-h3-emlid-neutis-n5h3-devboard.dtb \ - sun8i-r16-bananapi-m2m.dtb \ - sun8i-r16-nintendo-nes-classic.dtb \ - sun8i-r16-nintendo-super-nes-classic.dtb \ - sun8i-r16-parrot.dtb \ - sun8i-r40-bananapi-m2-ultra.dtb \ - sun8i-r40-oka40i-c.dtb \ - sun8i-s3-elimo-initium.dtb \ - sun8i-s3-lichee-zero-plus.dtb \ - sun8i-s3-pinecube.dtb \ - sun8i-t113s-mangopi-mq-r-t113.dtb \ - sun8i-t3-cqa3t-bv3.dtb \ - sun8i-v3-sl631-imx179.dtb \ - sun8i-v3s-licheepi-zero.dtb \ - sun8i-v3s-licheepi-zero-dock.dtb \ - sun8i-v40-bananapi-m2-berry.dtb dtb-$(CONFIG_MACH_SUN9I) += \ sun9i-a80-optimus.dtb \ sun9i-a80-cubieboard4.dtb diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi b/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi index 4d6a0c3e8455..ff062f4fd726 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6q-kontron-samx6i.dtsi @@ -5,31 +5,8 @@ #include "imx6q.dtsi" #include "imx6qdl-kontron-samx6i.dtsi" -#include <dt-bindings/gpio/gpio.h> / { model = "Kontron SMARC sAMX6i Quad/Dual"; compatible = "kontron,imx6q-samx6i", "fsl,imx6q"; }; - -/* Quad/Dual SoMs have 3 chip-select signals */ -&ecspi4 { - cs-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>, - <&gpio3 29 GPIO_ACTIVE_LOW>, - <&gpio3 25 GPIO_ACTIVE_LOW>; -}; - -&pinctrl_ecspi4 { - fsl,pins = < - MX6QDL_PAD_EIM_D21__ECSPI4_SCLK 0x100b1 - MX6QDL_PAD_EIM_D28__ECSPI4_MOSI 0x100b1 - MX6QDL_PAD_EIM_D22__ECSPI4_MISO 0x100b1 - - /* SPI4_IMX_CS2# - connected to internal flash */ - MX6QDL_PAD_EIM_D24__GPIO3_IO24 0x1b0b0 - /* SPI4_IMX_CS0# - connected to SMARC SPI0_CS0# */ - MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x1b0b0 - /* SPI4_CS3# - connected to SMARC SPI0_CS1# */ - MX6QDL_PAD_EIM_D25__GPIO3_IO25 0x1b0b0 - >; -}; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi index 85aeebc9485d..668d33d1ff0c 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-kontron-samx6i.dtsi @@ -244,7 +244,8 @@ &ecspi4 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_ecspi4>; cs-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>, - <&gpio3 29 GPIO_ACTIVE_LOW>; + <&gpio3 29 GPIO_ACTIVE_LOW>, + <&gpio3 25 GPIO_ACTIVE_LOW>; status = "okay"; /* default boot source: workaround #1 for errata ERR006282 */ @@ -259,7 +260,7 @@ smarc_flash: flash@0 { &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; - phy-mode = "rgmii"; + phy-connection-type = "rgmii-id"; phy-handle = <ðphy>; mdio { @@ -269,7 +270,7 @@ mdio { ethphy: ethernet-phy@1 { compatible = "ethernet-phy-ieee802.3-c22"; reg = <1>; - reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; reset-assert-us = <1000>; }; }; @@ -464,6 +465,8 @@ MX6QDL_PAD_EIM_D22__ECSPI4_MISO 0x100b1 MX6QDL_PAD_EIM_D24__GPIO3_IO24 0x1b0b0 /* SPI_IMX_CS0# - connected to SMARC SPI0_CS0# */ MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x1b0b0 + /* SPI4_CS3# - connected to SMARC SPI0_CS1# */ + MX6QDL_PAD_EIM_D25__GPIO3_IO25 0x1b0b0 >; }; @@ -516,7 +519,7 @@ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0 MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0 MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0 MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0 - MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b0b0 /* RST_GBE0_PHY# */ + MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x1b0b0 /* RST_GBE0_PHY# */ >; }; @@ -729,7 +732,7 @@ &pcie { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pcie>; wake-up-gpio = <&gpio6 18 GPIO_ACTIVE_HIGH>; - reset-gpio = <&gpio3 13 GPIO_ACTIVE_HIGH>; + reset-gpio = <&gpio3 13 GPIO_ACTIVE_LOW>; }; /* LCD_BKLT_PWM */ @@ -817,5 +820,6 @@ &wdog1 { /* CPLD is feeded by watchdog (hardwired) */ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_wdog1>; + fsl,ext-reset-output; status = "okay"; }; diff --git a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi index 525d8c608b06..8839b23fc693 100644 --- a/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi +++ b/arch/arm/boot/dts/qcom/qcom-msm8226-microsoft-common.dtsi @@ -287,6 +287,10 @@ &sdhc_2 { status = "okay"; }; +&smbb { + status = "okay"; +}; + &usb { extcon = <&smbb>; dr_mode = "peripheral"; diff --git a/arch/arm/boot/dts/st/stm32mp151.dtsi b/arch/arm/boot/dts/st/stm32mp151.dtsi index 90c5c72c87ab..4f878ec102c1 100644 --- a/arch/arm/boot/dts/st/stm32mp151.dtsi +++ b/arch/arm/boot/dts/st/stm32mp151.dtsi @@ -50,6 +50,7 @@ timer { <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>; interrupt-parent = <&intc>; + arm,no-tick-in-suspend; }; clocks { diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 3c5f5a3cb480..10ab16dcd827 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -520,10 +520,8 @@ static struct gpiod_lookup_table spitz_ads7846_gpio_table = { static struct gpiod_lookup_table spitz_lcdcon_gpio_table = { .dev_id = "spi2.1", .table = { - GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_BACKLIGHT_CONT, - "BL_CONT", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("gpio-pxa", SPITZ_GPIO_BACKLIGHT_ON, - "BL_ON", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sharp-scoop.1", 6, "BL_CONT", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("sharp-scoop.1", 7, "BL_ON", GPIO_ACTIVE_HIGH), { }, }, }; @@ -531,10 +529,8 @@ static struct gpiod_lookup_table spitz_lcdcon_gpio_table = { static struct gpiod_lookup_table akita_lcdcon_gpio_table = { .dev_id = "spi2.1", .table = { - GPIO_LOOKUP("gpio-pxa", AKITA_GPIO_BACKLIGHT_CONT, - "BL_CONT", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("gpio-pxa", AKITA_GPIO_BACKLIGHT_ON, - "BL_ON", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("i2c-max7310", 3, "BL_ON", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("i2c-max7310", 4, "BL_CONT", GPIO_ACTIVE_LOW), { }, }, }; @@ -964,12 +960,9 @@ static inline void spitz_i2c_init(void) {} static struct gpiod_lookup_table spitz_audio_gpio_table = { .dev_id = "spitz-audio", .table = { - GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE, - "mute-l", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE, - "mute-r", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("sharp-scoop.1", SPITZ_GPIO_MIC_BIAS - SPITZ_SCP2_GPIO_BASE, - "mic", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sharp-scoop.0", 3, "mute-l", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sharp-scoop.0", 4, "mute-r", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sharp-scoop.1", 8, "mic", GPIO_ACTIVE_HIGH), { }, }, }; @@ -977,12 +970,9 @@ static struct gpiod_lookup_table spitz_audio_gpio_table = { static struct gpiod_lookup_table akita_audio_gpio_table = { .dev_id = "spitz-audio", .table = { - GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_L - SPITZ_SCP_GPIO_BASE, - "mute-l", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("sharp-scoop.0", SPITZ_GPIO_MUTE_R - SPITZ_SCP_GPIO_BASE, - "mute-r", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("i2c-max7310", AKITA_GPIO_MIC_BIAS - AKITA_IOEXP_GPIO_BASE, - "mic", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sharp-scoop.0", 3, "mute-l", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("sharp-scoop.0", 4, "mute-r", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("i2c-max7310", 2, "mic", GPIO_ACTIVE_HIGH), { }, }, }; diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 67c425341a95..ab01b51de559 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -25,6 +25,8 @@ #include "fault.h" +#ifdef CONFIG_MMU + bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { unsigned long addr = (unsigned long)unsafe_src; @@ -32,8 +34,6 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) return addr >= TASK_SIZE && ULONG_MAX - addr >= size; } -#ifdef CONFIG_MMU - /* * This is useful to dump out the page tables associated with * 'addr' in mm 'mm'. diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi index b058ed78faf0..dbadbdb8f931 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi @@ -215,6 +215,11 @@ hdmi_tx: hdmi-tx@0 { #sound-dai-cells = <0>; status = "disabled"; + assigned-clocks = <&clkc CLKID_HDMI_SEL>, + <&clkc CLKID_HDMI>; + assigned-clock-parents = <&xtal>, <0>; + assigned-clock-rates = <0>, <24000000>; + /* VPU VENC Input */ hdmi_tx_venc_port: port@0 { reg = <0>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12.dtsi index e732df3f3114..664912d1beaa 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12.dtsi @@ -363,6 +363,10 @@ ðmac { power-domains = <&pwrc PWRC_G12A_ETH_ID>; }; +&hdmi_tx { + power-domains = <&pwrc PWRC_G12A_VPU_ID>; +}; + &vpu { power-domains = <&pwrc PWRC_G12A_VPU_ID>; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi index 12ef6e81c8bd..ed00e67e6923 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi @@ -311,10 +311,16 @@ &hdmi_tx { <&reset RESET_HDMI_SYSTEM_RESET>, <&reset RESET_HDMI_TX>; reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy"; - clocks = <&clkc CLKID_HDMI_PCLK>, - <&clkc CLKID_CLK81>, + clocks = <&clkc CLKID_HDMI>, + <&clkc CLKID_HDMI_PCLK>, <&clkc CLKID_GCLK_VENCI_INT0>; clock-names = "isfr", "iahb", "venci"; + power-domains = <&pwrc PWRC_GXBB_VPU_ID>; + + assigned-clocks = <&clkc CLKID_HDMI_SEL>, + <&clkc CLKID_HDMI>; + assigned-clock-parents = <&xtal>, <0>; + assigned-clock-rates = <0>, <24000000>; }; &sysctrl { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index 17bcfa4702e1..f58d1790de1c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -323,10 +323,16 @@ &hdmi_tx { <&reset RESET_HDMI_SYSTEM_RESET>, <&reset RESET_HDMI_TX>; reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy"; - clocks = <&clkc CLKID_HDMI_PCLK>, - <&clkc CLKID_CLK81>, + clocks = <&clkc CLKID_HDMI>, + <&clkc CLKID_HDMI_PCLK>, <&clkc CLKID_GCLK_VENCI_INT0>; clock-names = "isfr", "iahb", "venci"; + power-domains = <&pwrc PWRC_GXBB_VPU_ID>; + + assigned-clocks = <&clkc CLKID_HDMI_SEL>, + <&clkc CLKID_HDMI>; + assigned-clock-parents = <&xtal>, <0>; + assigned-clock-rates = <0>, <24000000>; }; &sysctrl { diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi index 643f94d9d08e..13e742ba00be 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi @@ -339,7 +339,7 @@ tdmin_lb: audio-controller@3c0 { }; spdifin: audio-controller@400 { - compatible = "amlogic,g12a-spdifin", + compatible = "amlogic,sm1-spdifin", "amlogic,axg-spdifin"; reg = <0x0 0x400 0x0 0x30>; #sound-dai-cells = <0>; @@ -353,7 +353,7 @@ spdifin: audio-controller@400 { }; spdifout_a: audio-controller@480 { - compatible = "amlogic,g12a-spdifout", + compatible = "amlogic,sm1-spdifout", "amlogic,axg-spdifout"; reg = <0x0 0x480 0x0 0x50>; #sound-dai-cells = <0>; @@ -518,6 +518,10 @@ &gpio_intc { "amlogic,meson-gpio-intc"; }; +&hdmi_tx { + power-domains = <&pwrc PWRC_SM1_VPU_ID>; +}; + &pcie { power-domains = <&pwrc PWRC_SM1_PCIE_ID>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi index b92abb5a5c53..ee0c864f27e8 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi @@ -789,6 +789,23 @@ pgc_usb2_phy: power-domain@3 { reg = <IMX8MP_POWER_DOMAIN_USB2_PHY>; }; + pgc_mlmix: power-domain@4 { + #power-domain-cells = <0>; + reg = <IMX8MP_POWER_DOMAIN_MLMIX>; + clocks = <&clk IMX8MP_CLK_ML_AXI>, + <&clk IMX8MP_CLK_ML_AHB>, + <&clk IMX8MP_CLK_NPU_ROOT>; + assigned-clocks = <&clk IMX8MP_CLK_ML_CORE>, + <&clk IMX8MP_CLK_ML_AXI>, + <&clk IMX8MP_CLK_ML_AHB>; + assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>, + <&clk IMX8MP_SYS_PLL1_800M>, + <&clk IMX8MP_SYS_PLL1_800M>; + assigned-clock-rates = <800000000>, + <800000000>, + <300000000>; + }; + pgc_audio: power-domain@5 { #power-domain-cells = <0>; reg = <IMX8MP_POWER_DOMAIN_AUDIOMIX>; @@ -821,6 +838,12 @@ pgc_gpumix: power-domain@7 { assigned-clock-rates = <800000000>, <400000000>; }; + pgc_vpumix: power-domain@8 { + #power-domain-cells = <0>; + reg = <IMX8MP_POWER_DOMAIN_VPUMIX>; + clocks = <&clk IMX8MP_CLK_VPU_ROOT>; + }; + pgc_gpu3d: power-domain@9 { #power-domain-cells = <0>; reg = <IMX8MP_POWER_DOMAIN_GPU3D>; @@ -836,6 +859,28 @@ pgc_mediamix: power-domain@10 { <&clk IMX8MP_CLK_MEDIA_APB_ROOT>; }; + pgc_vpu_g1: power-domain@11 { + #power-domain-cells = <0>; + power-domains = <&pgc_vpumix>; + reg = <IMX8MP_POWER_DOMAIN_VPU_G1>; + clocks = <&clk IMX8MP_CLK_VPU_G1_ROOT>; + }; + + pgc_vpu_g2: power-domain@12 { + #power-domain-cells = <0>; + power-domains = <&pgc_vpumix>; + reg = <IMX8MP_POWER_DOMAIN_VPU_G2>; + clocks = <&clk IMX8MP_CLK_VPU_G2_ROOT>; + + }; + + pgc_vpu_vc8000e: power-domain@13 { + #power-domain-cells = <0>; + power-domains = <&pgc_vpumix>; + reg = <IMX8MP_POWER_DOMAIN_VPU_VC8000E>; + clocks = <&clk IMX8MP_CLK_VPU_VC8KE_ROOT>; + }; + pgc_hdmimix: power-domain@14 { #power-domain-cells = <0>; reg = <IMX8MP_POWER_DOMAIN_HDMIMIX>; @@ -873,50 +918,6 @@ pgc_ispdwp: power-domain@18 { reg = <IMX8MP_POWER_DOMAIN_MEDIAMIX_ISPDWP>; clocks = <&clk IMX8MP_CLK_MEDIA_ISP_ROOT>; }; - - pgc_vpumix: power-domain@19 { - #power-domain-cells = <0>; - reg = <IMX8MP_POWER_DOMAIN_VPUMIX>; - clocks = <&clk IMX8MP_CLK_VPU_ROOT>; - }; - - pgc_vpu_g1: power-domain@20 { - #power-domain-cells = <0>; - power-domains = <&pgc_vpumix>; - reg = <IMX8MP_POWER_DOMAIN_VPU_G1>; - clocks = <&clk IMX8MP_CLK_VPU_G1_ROOT>; - }; - - pgc_vpu_g2: power-domain@21 { - #power-domain-cells = <0>; - power-domains = <&pgc_vpumix>; - reg = <IMX8MP_POWER_DOMAIN_VPU_G2>; - clocks = <&clk IMX8MP_CLK_VPU_G2_ROOT>; - }; - - pgc_vpu_vc8000e: power-domain@22 { - #power-domain-cells = <0>; - power-domains = <&pgc_vpumix>; - reg = <IMX8MP_POWER_DOMAIN_VPU_VC8000E>; - clocks = <&clk IMX8MP_CLK_VPU_VC8KE_ROOT>; - }; - - pgc_mlmix: power-domain@24 { - #power-domain-cells = <0>; - reg = <IMX8MP_POWER_DOMAIN_MLMIX>; - clocks = <&clk IMX8MP_CLK_ML_AXI>, - <&clk IMX8MP_CLK_ML_AHB>, - <&clk IMX8MP_CLK_NPU_ROOT>; - assigned-clocks = <&clk IMX8MP_CLK_ML_CORE>, - <&clk IMX8MP_CLK_ML_AXI>, - <&clk IMX8MP_CLK_ML_AHB>; - assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>, - <&clk IMX8MP_SYS_PLL1_800M>, - <&clk IMX8MP_SYS_PLL1_800M>; - assigned-clock-rates = <800000000>, - <800000000>, - <300000000>; - }; }; }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts index 224bb289660c..2791de5b28f6 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts +++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts @@ -329,8 +329,8 @@ asm_sel { /* eMMC is shared pin with parallel NAND */ emmc_pins_default: emmc-pins-default { mux { - function = "emmc", "emmc_rst"; - groups = "emmc"; + function = "emmc"; + groups = "emmc", "emmc_rst"; }; /* "NDL0","NDL1","NDL2","NDL3","NDL4","NDL5","NDL6","NDL7", diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts index 41629769bdc8..8c3e2e2578bc 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts +++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts @@ -268,8 +268,8 @@ &pio { /* eMMC is shared pin with parallel NAND */ emmc_pins_default: emmc-pins-default { mux { - function = "emmc", "emmc_rst"; - groups = "emmc"; + function = "emmc"; + groups = "emmc", "emmc_rst"; }; /* "NDL0","NDL1","NDL2","NDL3","NDL4","NDL5","NDL6","NDL7", diff --git a/arch/arm64/boot/dts/mediatek/mt7981b.dtsi b/arch/arm64/boot/dts/mediatek/mt7981b.dtsi index 4feff3d1c5f4..178e1e96c3a4 100644 --- a/arch/arm64/boot/dts/mediatek/mt7981b.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7981b.dtsi @@ -78,10 +78,10 @@ pwm@10048000 { compatible = "mediatek,mt7981-pwm"; reg = <0 0x10048000 0 0x1000>; clocks = <&infracfg CLK_INFRA_PWM_STA>, - <&infracfg CLK_INFRA_PWM_HCK>, - <&infracfg CLK_INFRA_PWM1_CK>, - <&infracfg CLK_INFRA_PWM2_CK>, - <&infracfg CLK_INFRA_PWM3_CK>; + <&infracfg CLK_INFRA_PWM_HCK>, + <&infracfg CLK_INFRA_PWM1_CK>, + <&infracfg CLK_INFRA_PWM2_CK>, + <&infracfg CLK_INFRA_PWM3_CK>; clock-names = "top", "main", "pwm1", "pwm2", "pwm3"; #pwm-cells = <2>; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi index 8b57706ac814..586eee79c73c 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-audio-da7219.dtsi @@ -27,7 +27,7 @@ da7219_aad { dlg,btn-cfg = <50>; dlg,mic-det-thr = <500>; dlg,jack-ins-deb = <20>; - dlg,jack-det-rate = "32ms_64ms"; + dlg,jack-det-rate = "32_64"; dlg,jack-rem-deb = <1>; dlg,a-d-btn-thr = <0xa>; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts index 6a7ae616512d..0d5a11c93c68 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts @@ -17,7 +17,7 @@ bt_wakeup: bt-wakeup { pinctrl-names = "default"; pinctrl-0 = <&bt_pins_wakeup>; - wobt { + event-wobt { label = "Wake on BT"; gpios = <&pio 42 GPIO_ACTIVE_HIGH>; linux,code = <KEY_WAKEUP>; @@ -47,10 +47,8 @@ trackpad@2c { }; }; -&wifi_wakeup { - wowlan { - gpios = <&pio 113 GPIO_ACTIVE_LOW>; - }; +&wifi_wakeup_event { + gpios = <&pio 113 GPIO_ACTIVE_LOW>; }; &wifi_pwrseq { diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi index 7592e3b86037..fa4ab4d2899f 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi @@ -155,21 +155,24 @@ anx_bridge: anx7625@58 { vdd18-supply = <&pp1800_mipibrdg>; vdd33-supply = <&vddio_mipibrdg>; - #address-cells = <1>; - #size-cells = <0>; - port@0 { - reg = <0>; + ports { + #address-cells = <1>; + #size-cells = <0>; - anx7625_in: endpoint { - remote-endpoint = <&dsi_out>; + port@0 { + reg = <0>; + + anx7625_in: endpoint { + remote-endpoint = <&dsi_out>; + }; }; - }; - port@1 { - reg = <1>; + port@1 { + reg = <1>; - anx7625_out: endpoint { - remote-endpoint = <&panel_in>; + anx7625_out: endpoint { + remote-endpoint = <&panel_in>; + }; }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi index 100191c6453b..2fbd226bf142 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi @@ -152,7 +152,7 @@ wifi_wakeup: wifi-wakeup { pinctrl-names = "default"; pinctrl-0 = <&wifi_pins_wakeup>; - button-wowlan { + wifi_wakeup_event: event-wowlan { label = "Wake on WiFi"; gpios = <&pio 113 GPIO_ACTIVE_HIGH>; linux,code = <KEY_WAKEUP>; @@ -803,7 +803,6 @@ pins-tx { }; pins-rts { pinmux = <PINMUX_GPIO47__FUNC_URTS1>; - output-enable; }; pins-cts { pinmux = <PINMUX_GPIO46__FUNC_UCTS1>; @@ -822,7 +821,6 @@ pins-tx { }; pins-rts { pinmux = <PINMUX_GPIO47__FUNC_URTS1>; - output-enable; }; pins-cts { pinmux = <PINMUX_GPIO46__FUNC_UCTS1>; diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi index 7a704246678f..08d71ddf3668 100644 --- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi @@ -147,6 +147,7 @@ pp3300_mipibrdg: regulator-3v3-mipibrdg { regulator-boot-on; gpio = <&pio 127 GPIO_ACTIVE_HIGH>; vin-supply = <&pp3300_g>; + off-on-delay-us = <500000>; }; /* separately switched 3.3V power rail */ diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi index 84cbdf6e9eb0..47dea10dd3b8 100644 --- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi @@ -2234,7 +2234,7 @@ vpu1_crit: trip-crit { }; }; - gpu0-thermal { + gpu-thermal { polling-delay = <1000>; polling-delay-passive = <250>; thermal-sensors = <&lvts_ap MT8192_AP_GPU0>; diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi index 5d8b68f86ce4..2ee45752583c 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi @@ -3880,7 +3880,7 @@ vpu1_crit: trip-crit { }; }; - gpu0-thermal { + gpu-thermal { polling-delay = <1000>; polling-delay-passive = <250>; thermal-sensors = <&lvts_ap MT8195_AP_GPU0>; diff --git a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts index e5d9b671a405..97634cc04e65 100644 --- a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts +++ b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts @@ -528,7 +528,7 @@ i2c6_pins: i2c6-pins { pins { pinmux = <PINMUX_GPIO25__FUNC_SDA6>, <PINMUX_GPIO26__FUNC_SCL6>; - bias-pull-up = <MTK_PULL_SET_RSEL_111>; + bias-disable; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi index 5ab583be9e0a..0386636a29f0 100644 --- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi @@ -405,7 +405,6 @@ &usb3_dwc3 { &hsusb_phy1 { status = "okay"; - extcon = <&typec>; vdda-pll-supply = <&vreg_l12a_1p8>; vdda-phy-dpdm-supply = <&vreg_l24a_3p075>; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 6e7a4bb08b35..0717605ac5a0 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -2102,7 +2102,7 @@ ufshc: ufshc@624000 { <&gcc GCC_UFS_RX_SYMBOL_0_CLK>; freq-table-hz = <100000000 200000000>, - <0 0>, + <100000000 200000000>, <0 0>, <0 0>, <0 0>, diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi index 2dbef4b526ab..a88bff737d17 100644 --- a/arch/arm64/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi @@ -1590,7 +1590,6 @@ adreno_smmu: iommu@5040000 { * SoC VDDMX RPM Power Domain in the Adreno driver. */ power-domains = <&gpucc GPU_GX_GDSC>; - status = "disabled"; }; gpucc: clock-controller@5065000 { diff --git a/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts b/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts index f3432701945f..8cd2fe80dbb2 100644 --- a/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts +++ b/arch/arm64/boot/dts/qcom/qcm6490-fairphone-fp5.dts @@ -864,7 +864,6 @@ sw_ctrl_default: sw-ctrl-default-state { }; &uart5 { - compatible = "qcom,geni-debug-uart"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts index 47ca2d000341..107302680f56 100644 --- a/arch/arm64/boot/dts/qcom/qcm6490-idp.dts +++ b/arch/arm64/boot/dts/qcom/qcm6490-idp.dts @@ -658,7 +658,6 @@ &tlmm { }; &uart5 { - compatible = "qcom,geni-debug-uart"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts index a085ff5b5fb2..7256b51eb08f 100644 --- a/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts +++ b/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dts @@ -632,7 +632,6 @@ &tlmm { }; &uart5 { - compatible = "qcom,geni-debug-uart"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/qdu1000.dtsi b/arch/arm64/boot/dts/qcom/qdu1000.dtsi index f90f03fa6a24..1da40f4b4f8a 100644 --- a/arch/arm64/boot/dts/qcom/qdu1000.dtsi +++ b/arch/arm64/boot/dts/qcom/qdu1000.dtsi @@ -1478,6 +1478,21 @@ system-cache-controller@19200000 { "llcc7_base", "llcc_broadcast_base"; interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>; + + nvmem-cells = <&multi_chan_ddr>; + nvmem-cell-names = "multi-chan-ddr"; + }; + + sec_qfprom: efuse@221c8000 { + compatible = "qcom,qdu1000-sec-qfprom", "qcom,sec-qfprom"; + reg = <0 0x221c8000 0 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + + multi_chan_ddr: multi-chan-ddr@12b { + reg = <0x12b 0x1>; + bits = <0 2>; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts index cb8a62714a30..1888d99d398b 100644 --- a/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts +++ b/arch/arm64/boot/dts/qcom/qrb4210-rb2.dts @@ -305,7 +305,7 @@ pmi632_ss_in: endpoint { &pmi632_vbus { regulator-min-microamp = <500000>; - regulator-max-microamp = <3000000>; + regulator-max-microamp = <1000000>; status = "okay"; }; @@ -414,6 +414,8 @@ vreg_l9a_1p8: l9 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-allow-set-load; + regulator-always-on; + regulator-boot-on; }; vreg_l10a_1p8: l10 { diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi index 1b3dc0ece54d..490e0369f529 100644 --- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi @@ -2504,6 +2504,7 @@ ethernet1: ethernet@23000000 { phy-names = "serdes"; iommus = <&apps_smmu 0x140 0xf>; + dma-coherent; snps,tso; snps,pbl = <32>; @@ -2538,6 +2539,7 @@ ethernet0: ethernet@23040000 { phy-names = "serdes"; iommus = <&apps_smmu 0x120 0xf>; + dma-coherent; snps,tso; snps,pbl = <32>; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-kb.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-kb.dts index 919bfaea6189..340cb119d0a0 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-kb.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-kb.dts @@ -12,6 +12,6 @@ / { compatible = "google,lazor-rev1-sku2", "google,lazor-rev2-sku2", "qcom,sc7180"; }; -&keyboard_backlight { +&pwmleds { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-lte.dts index eb20157f6af9..d45e60e3eb9e 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-lte.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1-lte.dts @@ -17,6 +17,6 @@ &ap_sar_sensor_i2c { status = "okay"; }; -&keyboard_backlight { +&pwmleds { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-kb.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-kb.dts index 45d34718a1bc..e906ce877b8c 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-kb.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-kb.dts @@ -18,6 +18,6 @@ / { compatible = "google,lazor-sku2", "qcom,sc7180"; }; -&keyboard_backlight { +&pwmleds { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-lte.dts index 79028d0dd1b0..4b9ee15b09f6 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-lte.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r10-lte.dts @@ -22,6 +22,6 @@ &ap_sar_sensor_i2c { status = "okay"; }; -&keyboard_backlight { +&pwmleds { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts index 3459b81c5628..a960553f3994 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts @@ -21,6 +21,6 @@ / { "qcom,sc7180"; }; -&keyboard_backlight { +&pwmleds { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts index ff8f47da109d..82bd9ed7e21a 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts @@ -25,6 +25,6 @@ &ap_sar_sensor_i2c { status = "okay"; }; -&keyboard_backlight { +&pwmleds { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-kb.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-kb.dts index faf527972977..6278c1715d3f 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-kb.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-kb.dts @@ -18,6 +18,6 @@ / { compatible = "google,lazor-rev9-sku2", "qcom,sc7180"; }; -&keyboard_backlight { +&pwmleds { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-lte.dts index d737fd0637fb..0ec1697ae2c9 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-lte.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r9-lte.dts @@ -22,6 +22,6 @@ &ap_sar_sensor_i2c { status = "okay"; }; -&keyboard_backlight { +&pwmleds { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi index 8513be297120..098a8b4c793e 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi @@ -359,10 +359,11 @@ max98360a: audio-codec-0 { #sound-dai-cells = <0>; }; - pwmleds { + pwmleds: pwmleds { compatible = "pwm-leds"; + status = "disabled"; + keyboard_backlight: led-0 { - status = "disabled"; label = "cros_ec::kbd_backlight"; function = LED_FUNCTION_KBD_BACKLIGHT; pwms = <&cros_ec_pwm 0>; diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi index e9deffe3aaf6..9ab0c98cac05 100644 --- a/arch/arm64/boot/dts/qcom/sc7180.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi @@ -1582,8 +1582,7 @@ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>, }; ufs_mem_phy: phy@1d87000 { - compatible = "qcom,sc7180-qmp-ufs-phy", - "qcom,sm7150-qmp-ufs-phy"; + compatible = "qcom,sc7180-qmp-ufs-phy"; reg = <0 0x01d87000 0 0x1000>; clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_UFS_PHY_PHY_AUX_CLK>, diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi index a0059527d9e4..7370aa0dbf0e 100644 --- a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi @@ -495,7 +495,6 @@ wcd_tx: codec@0,3 { }; &uart5 { - compatible = "qcom,geni-debug-uart"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi index f9b96bd2477e..7d1d5bbbbbd9 100644 --- a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi @@ -427,7 +427,6 @@ wcd_tx: codec@0,3 { }; uart_dbg: &uart5 { - compatible = "qcom,geni-debug-uart"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi index 2f7780f629ac..c4a05d7b7ce6 100644 --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -1440,12 +1440,12 @@ spi5: spi@994000 { }; uart5: serial@994000 { - compatible = "qcom,geni-uart"; + compatible = "qcom,geni-debug-uart"; reg = <0 0x00994000 0 0x4000>; clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>; clock-names = "se"; pinctrl-names = "default"; - pinctrl-0 = <&qup_uart5_cts>, <&qup_uart5_rts>, <&qup_uart5_tx>, <&qup_uart5_rx>; + pinctrl-0 = <&qup_uart5_tx>, <&qup_uart5_rx>; interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>; power-domains = <&rpmhpd SC7280_CX>; operating-points-v2 = <&qup_opp_table>; @@ -5408,16 +5408,6 @@ qup_uart4_rx: qup-uart4-rx-state { function = "qup04"; }; - qup_uart5_cts: qup-uart5-cts-state { - pins = "gpio20"; - function = "qup05"; - }; - - qup_uart5_rts: qup-uart5-rts-state { - pins = "gpio21"; - function = "qup05"; - }; - qup_uart5_tx: qup-uart5-tx-state { pins = "gpio22"; function = "qup05"; diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi index 581a70c34fd2..da69577b6f09 100644 --- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi @@ -1890,7 +1890,7 @@ pcie3: pcie@1c08000 { power-domains = <&gcc PCIE_3_GDSC>; interconnects = <&aggre2_noc MASTER_PCIE_3 0 &mc_virt SLAVE_EBI_CH0 0>, - <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>; + <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_3 0>; interconnect-names = "pcie-mem", "cpu-pcie"; phys = <&pcie3_phy>; @@ -2012,7 +2012,7 @@ pcie1: pcie@1c10000 { power-domains = <&gcc PCIE_1_GDSC>; interconnects = <&aggre2_noc MASTER_PCIE_1 0 &mc_virt SLAVE_EBI_CH0 0>, - <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>; + <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_1 0>; interconnect-names = "pcie-mem", "cpu-pcie"; phys = <&pcie1_phy>; @@ -2134,7 +2134,7 @@ pcie2: pcie@1c18000 { power-domains = <&gcc PCIE_2_GDSC>; interconnects = <&aggre2_noc MASTER_PCIE_2 0 &mc_virt SLAVE_EBI_CH0 0>, - <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_0 0>; + <&gem_noc MASTER_AMPSS_M0 0 &config_noc SLAVE_PCIE_2 0>; interconnect-names = "pcie-mem", "cpu-pcie"; phys = <&pcie2_phy>; @@ -2245,6 +2245,8 @@ ufs_mem_phy: phy-wrapper@1d87000 { resets = <&ufs_mem_hc 0>; reset-names = "ufsphy"; + power-domains = <&gcc UFS_PHY_GDSC>; + #phy-cells = <0>; status = "disabled"; diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts index 4bf99b6b6e5f..6b759e67f4d3 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts +++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts @@ -299,7 +299,7 @@ linux,cma { thermal-zones { skin-temp-thermal { polling-delay-passive = <250>; - polling-delay = <0>; + thermal-sensors = <&pmk8280_adc_tm 5>; trips { diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi index 945de77911de..1e3babf2e40d 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi @@ -14,7 +14,7 @@ / { thermal-zones { pm8280_1_thermal: pm8280-1-thermal { polling-delay-passive = <100>; - polling-delay = <0>; + thermal-sensors = <&pm8280_1_temp_alarm>; trips { @@ -34,7 +34,7 @@ trip1 { pm8280_2_thermal: pm8280-2-thermal { polling-delay-passive = <100>; - polling-delay = <0>; + thermal-sensors = <&pm8280_2_temp_alarm>; trips { diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi index 59f0a850671a..b0b0ab779446 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi @@ -5833,7 +5833,6 @@ sound: sound { thermal-zones { cpu0-thermal { polling-delay-passive = <250>; - polling-delay = <1000>; thermal-sensors = <&tsens0 1>; @@ -5848,7 +5847,6 @@ cpu-crit { cpu1-thermal { polling-delay-passive = <250>; - polling-delay = <1000>; thermal-sensors = <&tsens0 2>; @@ -5863,7 +5861,6 @@ cpu-crit { cpu2-thermal { polling-delay-passive = <250>; - polling-delay = <1000>; thermal-sensors = <&tsens0 3>; @@ -5878,7 +5875,6 @@ cpu-crit { cpu3-thermal { polling-delay-passive = <250>; - polling-delay = <1000>; thermal-sensors = <&tsens0 4>; @@ -5893,7 +5889,6 @@ cpu-crit { cpu4-thermal { polling-delay-passive = <250>; - polling-delay = <1000>; thermal-sensors = <&tsens0 5>; @@ -5908,7 +5903,6 @@ cpu-crit { cpu5-thermal { polling-delay-passive = <250>; - polling-delay = <1000>; thermal-sensors = <&tsens0 6>; @@ -5923,7 +5917,6 @@ cpu-crit { cpu6-thermal { polling-delay-passive = <250>; - polling-delay = <1000>; thermal-sensors = <&tsens0 7>; @@ -5938,7 +5931,6 @@ cpu-crit { cpu7-thermal { polling-delay-passive = <250>; - polling-delay = <1000>; thermal-sensors = <&tsens0 8>; @@ -5953,7 +5945,6 @@ cpu-crit { cluster0-thermal { polling-delay-passive = <250>; - polling-delay = <1000>; thermal-sensors = <&tsens0 9>; @@ -5967,13 +5958,25 @@ cpu-crit { }; gpu-thermal { - polling-delay-passive = <0>; - polling-delay = <0>; + polling-delay-passive = <250>; thermal-sensors = <&tsens2 2>; + cooling-maps { + map0 { + trip = <&gpu_alert0>; + cooling-device = <&gpu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + trips { - gpu-crit { + gpu_alert0: trip-point0 { + temperature = <85000>; + hysteresis = <1000>; + type = "passive"; + }; + + trip-point1 { temperature = <110000>; hysteresis = <1000>; type = "critical"; @@ -5983,7 +5986,6 @@ gpu-crit { mem-thermal { polling-delay-passive = <250>; - polling-delay = <1000>; thermal-sensors = <&tsens1 15>; diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index d817a7751086..4ad82b0eb113 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -2666,6 +2666,8 @@ ufs_mem_phy: phy@1d87000 { "ref_aux", "qref"; + power-domains = <&gcc UFS_PHY_GDSC>; + resets = <&ufs_mem_hc 0>; reset-names = "ufsphy"; diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts index 47dc42f6e936..8e30f8cc0916 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts @@ -494,6 +494,7 @@ ecsh: hid@5c { &ipa { qcom,gsi-loader = "self"; memory-region = <&ipa_fw_mem>; + firmware-name = "qcom/sdm850/LENOVO/81JL/ipa_fws.elf"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi index b4ce5a322107..8fa3bacfb239 100644 --- a/arch/arm64/boot/dts/qcom/sm6115.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi @@ -1231,6 +1231,8 @@ ufs_mem_phy: phy@4807000 { "ref_aux", "qref"; + power-domains = <&gcc GCC_UFS_PHY_GDSC>; + resets = <&ufs_mem_hc 0>; reset-names = "ufsphy"; diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi index 6c7eac0110ba..60383f0d09f3 100644 --- a/arch/arm64/boot/dts/qcom/sm6350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi @@ -1197,6 +1197,8 @@ ufs_mem_phy: phy@1d87000 { "ref_aux", "qref"; + power-domains = <&gcc UFS_PHY_GDSC>; + resets = <&ufs_mem_hc 0>; reset-names = "ufsphy"; @@ -1321,6 +1323,7 @@ fastrpc { compatible = "qcom,fastrpc"; qcom,glink-channels = "fastrpcglink-apps-dsp"; label = "adsp"; + qcom,non-secure-domain; #address-cells = <1>; #size-cells = <0>; @@ -1580,6 +1583,7 @@ fastrpc { compatible = "qcom,fastrpc"; qcom,glink-channels = "fastrpcglink-apps-dsp"; label = "cdsp"; + qcom,non-secure-domain; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi index 8ccade628f1f..b2af44bc3b78 100644 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi @@ -2580,6 +2580,8 @@ ufs_mem_phy: phy@1d87000 { resets = <&ufs_mem_hc 0>; reset-names = "ufsphy"; + power-domains = <&gcc UFS_PHY_GDSC>; + #phy-cells = <0>; status = "disabled"; diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi index f7c4700f00c3..da936548c2ac 100644 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi @@ -1779,6 +1779,8 @@ ufs_mem_phy: phy@1d87000 { "ref_aux", "qref"; + power-domains = <&gcc UFS_PHY_GDSC>; + resets = <&ufs_mem_hc 0>; reset-names = "ufsphy"; diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi index 616461fcbab9..59428d2ee1ad 100644 --- a/arch/arm64/boot/dts/qcom/sm8450.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi @@ -4429,6 +4429,8 @@ ufs_mem_phy: phy@1d87000 { <&gcc GCC_UFS_PHY_PHY_AUX_CLK>, <&gcc GCC_UFS_0_CLKREF_EN>; + power-domains = <&gcc UFS_PHY_GDSC>; + resets = <&ufs_mem_hc 0>; reset-names = "ufsphy"; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts index 7618ae1f8b1c..b063dd28149e 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts @@ -840,7 +840,7 @@ &uart21 { }; &usb_1_ss0_hsphy { - vdd-supply = <&vreg_l2e_0p8>; + vdd-supply = <&vreg_l3j_0p8>; vdda12-supply = <&vreg_l2j_1p2>; phys = <&smb2360_0_eusb2_repeater>; @@ -865,7 +865,7 @@ &usb_1_ss0_dwc3 { }; &usb_1_ss1_hsphy { - vdd-supply = <&vreg_l2e_0p8>; + vdd-supply = <&vreg_l3j_0p8>; vdda12-supply = <&vreg_l2j_1p2>; phys = <&smb2360_1_eusb2_repeater>; @@ -890,7 +890,7 @@ &usb_1_ss1_dwc3 { }; &usb_1_ss2_hsphy { - vdd-supply = <&vreg_l2e_0p8>; + vdd-supply = <&vreg_l3j_0p8>; vdda12-supply = <&vreg_l2j_1p2>; phys = <&smb2360_2_eusb2_repeater>; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts index 5567636c8b27..df3577fcd93c 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts @@ -536,7 +536,7 @@ &uart21 { }; &usb_1_ss0_hsphy { - vdd-supply = <&vreg_l2e_0p8>; + vdd-supply = <&vreg_l3j_0p8>; vdda12-supply = <&vreg_l2j_1p2>; phys = <&smb2360_0_eusb2_repeater>; @@ -561,7 +561,7 @@ &usb_1_ss0_dwc3 { }; &usb_1_ss1_hsphy { - vdd-supply = <&vreg_l2e_0p8>; + vdd-supply = <&vreg_l3j_0p8>; vdda12-supply = <&vreg_l2j_1p2>; phys = <&smb2360_1_eusb2_repeater>; @@ -586,7 +586,7 @@ &usb_1_ss1_dwc3 { }; &usb_1_ss2_hsphy { - vdd-supply = <&vreg_l2e_0p8>; + vdd-supply = <&vreg_l3j_0p8>; vdda12-supply = <&vreg_l2j_1p2>; phys = <&smb2360_2_eusb2_repeater>; diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi index cfa70b441e32..d76347001cc1 100644 --- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi @@ -2919,6 +2919,9 @@ timer { interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, - <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; + <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>, + <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>; + interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", + "hyp-virt"; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi index 72cf30341fc4..9629adb47d99 100644 --- a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi @@ -1324,7 +1324,10 @@ timer { interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, - <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; + <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>, + <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>; + interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", + "hyp-virt"; }; ufs30_clk: ufs30-clk { diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi index 9bc542bc6169..873588a84e15 100644 --- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi @@ -2359,6 +2359,9 @@ timer { interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, - <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; + <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>, + <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>; + interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", + "hyp-virt"; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi index 6d791024cabe..792afe1a4574 100644 --- a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi @@ -16,7 +16,6 @@ / { cluster0_opp: opp-table-0 { compatible = "operating-points-v2"; - opp-shared; opp-500000000 { opp-hz = /bits/ 64 <500000000>; diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi index 165bfcfef3bc..18ef297db933 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi @@ -50,7 +50,10 @@ timer { interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, - <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; + <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>, + <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>; + interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", + "hyp-virt"; }; }; diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi index 88634ae43287..1a9891ba6c02 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi @@ -1334,6 +1334,9 @@ timer { interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, - <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; + <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>, + <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>; + interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", + "hyp-virt"; }; }; diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi index e89bfe4085f5..a2318478a66b 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi @@ -1342,6 +1342,9 @@ timer { interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, - <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; + <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>, + <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>; + interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", + "hyp-virt"; }; }; diff --git a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi index f5f3f4f4c8d6..a2adc4e27ce9 100644 --- a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi @@ -294,6 +294,9 @@ timer { interrupts-extended = <&gic GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, <&gic GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, - <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; + <&gic GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>, + <&gic GIC_PPI 12 IRQ_TYPE_LEVEL_LOW>; + interrupt-names = "sec-phys", "phys", "virt", "hyp-phys", + "hyp-virt"; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts index 079101cddd65..f1d4118ffb7d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts +++ b/arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts @@ -17,6 +17,7 @@ aliases { ethernet0 = &gmac; mmc0 = &emmc; mmc1 = &sdmmc; + mmc2 = &sdio; }; chosen { @@ -144,11 +145,25 @@ &emmc { &gmac { clock_in_out = "output"; + phy-handle = <&rtl8201f>; phy-supply = <&vcc_io>; - snps,reset-gpio = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>; - snps,reset-active-low; - snps,reset-delays-us = <0 50000 50000>; status = "okay"; + + mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + rtl8201f: ethernet-phy@1 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&mac_rst>; + reset-assert-us = <20000>; + reset-deassert-us = <50000>; + reset-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>; + }; + }; }; &gpio0 { @@ -221,6 +236,26 @@ &pinctrl { pinctrl-names = "default"; pinctrl-0 = <&rtc_32k>; + bluetooth { + bt_reg_on: bt-reg-on { + rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + bt_wake_host: bt-wake-host { + rockchip,pins = <4 RK_PB4 RK_FUNC_GPIO &pcfg_pull_down>; + }; + + host_wake_bt: host-wake-bt { + rockchip,pins = <4 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + gmac { + mac_rst: mac-rst { + rockchip,pins = <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + leds { green_led: green-led { rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>; @@ -264,15 +299,31 @@ &sdio { cap-sd-highspeed; cap-sdio-irq; keep-power-in-suspend; - max-frequency = <1000000>; + max-frequency = <100000000>; mmc-pwrseq = <&sdio_pwrseq>; + no-mmc; + no-sd; non-removable; - sd-uhs-sdr104; + sd-uhs-sdr50; + vmmc-supply = <&vcc_io>; + vqmmc-supply = <&vcc_1v8>; status = "okay"; + + rtl8723ds: wifi@1 { + reg = <1>; + interrupt-parent = <&gpio0>; + interrupts = <RK_PA0 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "host-wake"; + pinctrl-names = "default"; + pinctrl-0 = <&wifi_host_wake>; + }; }; &sdmmc { + cap-mmc-highspeed; cap-sd-highspeed; + disable-wp; + vmmc-supply = <&vcc_io>; status = "okay"; }; @@ -291,16 +342,22 @@ u2phy_otg: otg-port { }; &uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_xfer>; status = "okay"; }; &uart4 { + uart-has-rtscts; status = "okay"; bluetooth { - compatible = "realtek,rtl8723bs-bt"; - device-wake-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>; + compatible = "realtek,rtl8723ds-bt"; + device-wake-gpios = <&gpio4 RK_PB2 GPIO_ACTIVE_HIGH>; + enable-gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_HIGH>; host-wake-gpios = <&gpio4 RK_PB4 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&bt_reg_on &bt_wake_host &host_wake_bt>; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 07dcc949b899..b01efd6d042c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -850,8 +850,8 @@ cru: clock-controller@ff440000 { <0>, <24000000>, <24000000>, <24000000>, <15000000>, <15000000>, - <100000000>, <100000000>, - <100000000>, <100000000>, + <300000000>, <100000000>, + <400000000>, <100000000>, <50000000>, <100000000>, <100000000>, <100000000>, <50000000>, <50000000>, diff --git a/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts b/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts index 63eea27293fe..67e7801bd489 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts @@ -269,7 +269,7 @@ rk809: pmic@20 { vcc9-supply = <&vcc3v3_sys>; codec { - mic-in-differential; + rockchip,mic-in-differential; }; regulators { diff --git a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts index 19f8fc369b13..8c3ab07d3807 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts @@ -475,7 +475,7 @@ regulator-state-mem { }; codec { - mic-in-differential; + rockchip,mic-in-differential; }; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts index 58ab7e9971db..b5e67990dd0f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts @@ -11,6 +11,10 @@ aliases { }; }; +&pmu_io_domains { + vccio3-supply = <&vccio_sd>; +}; + &sdmmc0 { bus-width = <4>; cap-mmc-highspeed; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi index 89e84e3a9262..25c49bdbadbc 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi @@ -39,9 +39,9 @@ status_led: led-status { }; }; - dc_12v: dc-12v-regulator { + vcc12v_dcin: vcc12v-dcin-regulator { compatible = "regulator-fixed"; - regulator-name = "dc_12v"; + regulator-name = "vcc12v_dcin"; regulator-always-on; regulator-boot-on; regulator-min-microvolt = <12000000>; @@ -65,7 +65,7 @@ vcc3v3_sys: vcc3v3-sys-regulator { regulator-boot-on; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - vin-supply = <&dc_12v>; + vin-supply = <&vcc12v_dcin>; }; vcc5v0_sys: vcc5v0-sys-regulator { @@ -75,16 +75,7 @@ vcc5v0_sys: vcc5v0-sys-regulator { regulator-boot-on; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; - vin-supply = <&dc_12v>; - }; - - vcc5v0_usb_host: vcc5v0-usb-host-regulator { - compatible = "regulator-fixed"; - regulator-name = "vcc5v0_usb_host"; - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; + vin-supply = <&vcc12v_dcin>; }; vcc5v0_usb_otg: vcc5v0-usb-otg-regulator { @@ -94,8 +85,9 @@ vcc5v0_usb_otg: vcc5v0-usb-otg-regulator { pinctrl-names = "default"; pinctrl-0 = <&vcc5v0_usb_otg_en>; regulator-name = "vcc5v0_usb_otg"; - regulator-always-on; - regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_sys>; }; }; @@ -123,6 +115,10 @@ &cpu3 { cpu-supply = <&vdd_cpu>; }; +&display_subsystem { + status = "disabled"; +}; + &gpu { mali-supply = <&vdd_gpu>; status = "okay"; @@ -405,8 +401,8 @@ vcc5v0_usb_otg_en: vcc5v0-usb-otg-en { &pmu_io_domains { pmuio1-supply = <&vcc3v3_pmu>; pmuio2-supply = <&vcc3v3_pmu>; - vccio1-supply = <&vccio_acodec>; - vccio3-supply = <&vccio_sd>; + vccio1-supply = <&vcc_3v3>; + vccio2-supply = <&vcc_1v8>; vccio4-supply = <&vcc_1v8>; vccio5-supply = <&vcc_3v3>; vccio6-supply = <&vcc_1v8>; @@ -429,28 +425,12 @@ &uart2 { status = "okay"; }; -&usb_host0_ehci { - status = "okay"; -}; - -&usb_host0_ohci { - status = "okay"; -}; - &usb_host0_xhci { dr_mode = "host"; extcon = <&usb2phy0>; status = "okay"; }; -&usb_host1_ehci { - status = "okay"; -}; - -&usb_host1_ohci { - status = "okay"; -}; - &usb_host1_xhci { status = "okay"; }; @@ -460,7 +440,7 @@ &usb2phy0 { }; &usb2phy0_host { - phy-supply = <&vcc5v0_usb_host>; + phy-supply = <&vcc5v0_sys>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts index e1fe5e442689..ce2a5e1ccefc 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts @@ -39,7 +39,7 @@ &gmac0_tx_bus2 &gmac0_rx_bus2 &gmac0_rgmii_clk &gmac0_rgmii_bus>; - snps,reset-gpio = <&gpio0 RK_PB0 GPIO_ACTIVE_LOW>; + snps,reset-gpio = <&gpio1 RK_PB0 GPIO_ACTIVE_LOW>; snps,reset-active-low; /* Reset time is 15ms, 50ms for rtl8211f */ snps,reset-delays-us = <0 15000 50000>; @@ -61,7 +61,7 @@ &gmac1m1_tx_bus2 &gmac1m1_rx_bus2 &gmac1m1_rgmii_clk &gmac1m1_rgmii_bus>; - snps,reset-gpio = <&gpio0 RK_PB1 GPIO_ACTIVE_LOW>; + snps,reset-gpio = <&gpio1 RK_PB1 GPIO_ACTIVE_LOW>; snps,reset-active-low; /* Reset time is 15ms, 50ms for rtl8211f */ snps,reset-delays-us = <0 15000 50000>; @@ -71,18 +71,18 @@ &gmac1m1_rgmii_clk }; &mdio0 { - rgmii_phy0: ethernet-phy@0 { + rgmii_phy0: ethernet-phy@1 { compatible = "ethernet-phy-ieee802.3-c22"; - reg = <0>; + reg = <0x1>; pinctrl-0 = <ð_phy0_reset_pin>; pinctrl-names = "default"; }; }; &mdio1 { - rgmii_phy1: ethernet-phy@0 { + rgmii_phy1: ethernet-phy@1 { compatible = "ethernet-phy-ieee802.3-c22"; - reg = <0>; + reg = <0x1>; pinctrl-0 = <ð_phy1_reset_pin>; pinctrl-names = "default"; }; @@ -102,6 +102,10 @@ eth_phy1_reset_pin: eth-phy1-reset-pin { }; }; +&pmu_io_domains { + vccio3-supply = <&vcc_3v3>; +}; + &sdhci { bus-width = <8>; max-frequency = <200000000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts index ebdedea15ad1..59f1403b4fa5 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts @@ -531,10 +531,6 @@ regulator-state-mem { }; }; }; - - codec { - mic-in-differential; - }; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi index d8543b5557ee..3e2a8bfcafea 100644 --- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi @@ -790,6 +790,7 @@ vop_mmu: iommu@fe043e00 { clocks = <&cru ACLK_VOP>, <&cru HCLK_VOP>; clock-names = "aclk", "iface"; #iommu-cells = <0>; + power-domains = <&power RK3568_PD_VO>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi index 448a59dc53a7..0f2722c4bcc3 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi @@ -141,8 +141,8 @@ main_pktdma: dma-controller@485c0000 { compatible = "ti,am64-dmss-pktdma"; reg = <0x00 0x485c0000 0x00 0x100>, <0x00 0x4a800000 0x00 0x20000>, - <0x00 0x4aa00000 0x00 0x40000>, - <0x00 0x4b800000 0x00 0x400000>, + <0x00 0x4aa00000 0x00 0x20000>, + <0x00 0x4b800000 0x00 0x200000>, <0x00 0x485e0000 0x00 0x10000>, <0x00 0x484a0000 0x00 0x2000>, <0x00 0x484c0000 0x00 0x2000>, diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi index 2038c5e04639..359f53f3e019 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi @@ -1364,8 +1364,6 @@ &mcasp0 { 0 0 0 0 >; tdm-slots = <2>; - rx-num-evt = <32>; - tx-num-evt = <32>; #sound-dai-cells = <0>; status = "disabled"; }; @@ -1382,8 +1380,6 @@ &mcasp1 { 0 0 0 0 >; tdm-slots = <2>; - rx-num-evt = <32>; - tx-num-evt = <32>; #sound-dai-cells = <0>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts index 18e3070a8683..70de288d728e 100644 --- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts +++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts @@ -924,6 +924,4 @@ &mcasp1 { 0 0 0 0 0 0 0 0 >; - tx-num-evt = <32>; - rx-num-evt = <32>; }; diff --git a/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts b/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts index 50d2573c840e..6c24e4d39ee8 100644 --- a/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts +++ b/arch/arm64/boot/dts/ti/k3-am625-phyboard-lyra-rdk.dts @@ -441,8 +441,6 @@ &mcasp2 { 0 0 0 0 0 0 0 0 >; - tx-num-evt = <32>; - rx-num-evt = <32>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi index bf9c2d9c6439..ce4a2f105630 100644 --- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi @@ -120,8 +120,8 @@ main_pktdma: dma-controller@485c0000 { compatible = "ti,am64-dmss-pktdma"; reg = <0x00 0x485c0000 0x00 0x100>, <0x00 0x4a800000 0x00 0x20000>, - <0x00 0x4aa00000 0x00 0x40000>, - <0x00 0x4b800000 0x00 0x400000>, + <0x00 0x4aa00000 0x00 0x20000>, + <0x00 0x4b800000 0x00 0x200000>, <0x00 0x485e0000 0x00 0x10000>, <0x00 0x484a0000 0x00 0x2000>, <0x00 0x484c0000 0x00 0x2000>, diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts index fa43cd0b631e..e026f65738b3 100644 --- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts +++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts @@ -701,8 +701,6 @@ &mcasp1 { 0 0 0 0 0 0 0 0 >; - tx-num-evt = <32>; - rx-num-evt = <32>; }; &dss { diff --git a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi index 900d1f9530a2..2b9bc77a0540 100644 --- a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi @@ -123,8 +123,8 @@ main_pktdma: dma-controller@485c0000 { compatible = "ti,am64-dmss-pktdma"; reg = <0x00 0x485c0000 0x00 0x100>, <0x00 0x4a800000 0x00 0x20000>, - <0x00 0x4aa00000 0x00 0x40000>, - <0x00 0x4b800000 0x00 0x400000>, + <0x00 0x4aa00000 0x00 0x20000>, + <0x00 0x4b800000 0x00 0x200000>, <0x00 0x485e0000 0x00 0x10000>, <0x00 0x484a0000 0x00 0x2000>, <0x00 0x484c0000 0x00 0x2000>, diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts index 6e7234659111..fb980d46e304 100644 --- a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts +++ b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts @@ -207,7 +207,7 @@ main_mcasp1_pins_default: main-mcasp1-default-pins { pinctrl-single,pins = < AM62PX_IOPAD(0x0090, PIN_INPUT, 2) /* (U24) GPMC0_BE0n_CLE.MCASP1_ACLKX */ AM62PX_IOPAD(0x0098, PIN_INPUT, 2) /* (AA24) GPMC0_WAIT0.MCASP1_AFSX */ - AM62PX_IOPAD(0x008c, PIN_INPUT, 2) /* (T25) GPMC0_WEn.MCASP1_AXR0 */ + AM62PX_IOPAD(0x008c, PIN_OUTPUT, 2) /* (T25) GPMC0_WEn.MCASP1_AXR0 */ AM62PX_IOPAD(0x0084, PIN_INPUT, 2) /* (R25) GPMC0_ADVn_ALE.MCASP1_AXR2 */ >; }; @@ -549,8 +549,6 @@ &mcasp1 { 0 0 0 0 0 0 0 0 >; - tx-num-evt = <32>; - rx-num-evt = <32>; }; &fss { diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi index 3c45782ab2b7..63b4e88e3a94 100644 --- a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi @@ -504,8 +504,6 @@ &mcasp1 { 0 0 0 0 0 0 0 0 >; - tx-num-evt = <32>; - rx-num-evt = <32>; }; &dss { diff --git a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t.dts b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t.dts index 234d76e4e944..5b5e9eeec5ac 100644 --- a/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t.dts +++ b/arch/arm64/boot/dts/ti/k3-am642-hummingboard-t.dts @@ -282,7 +282,6 @@ &main_uart3 { pinctrl-names = "default"; pinctrl-0 = <&main_uart3_default_pins>; uart-has-rtscts; - rs485-rts-active-low; linux,rs485-enabled-at-boot-time; status = "okay"; }; diff --git a/arch/arm64/boot/dts/ti/k3-j722s.dtsi b/arch/arm64/boot/dts/ti/k3-j722s.dtsi index c75744edb143..9132b0232b0b 100644 --- a/arch/arm64/boot/dts/ti/k3-j722s.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j722s.dtsi @@ -83,6 +83,14 @@ &inta_main_dmss { ti,interrupt-ranges = <7 71 21>; }; +&main_gpio0 { + ti,ngpio = <87>; +}; + +&main_gpio1 { + ti,ngpio = <73>; +}; + &oc_sram { reg = <0x00 0x70000000 0x00 0x40000>; ranges = <0x00 0x00 0x70000000 0x40000>; diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index f8efbc128446..7a4f5604be3f 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1065,6 +1065,28 @@ static inline bool pgtable_l5_enabled(void) { return false; } #define p4d_offset_kimg(dir,addr) ((p4d_t *)dir) +static inline +p4d_t *p4d_offset_lockless_folded(pgd_t *pgdp, pgd_t pgd, unsigned long addr) +{ + /* + * With runtime folding of the pud, pud_offset_lockless() passes + * the 'pgd_t *' we return here to p4d_to_folded_pud(), which + * will offset the pointer assuming that it points into + * a page-table page. However, the fast GUP path passes us a + * pgd_t allocated on the stack and so we must use the original + * pointer in 'pgdp' to construct the p4d pointer instead of + * using the generic p4d_offset_lockless() implementation. + * + * Note: reusing the original pointer means that we may + * dereference the same (live) page-table entry multiple times. + * This is safe because it is still only loaded once in the + * context of each level and the CPU guarantees same-address + * read-after-read ordering. + */ + return p4d_offset(pgdp, addr); +} +#define p4d_offset_lockless p4d_offset_lockless_folded + #endif /* CONFIG_PGTABLE_LEVELS > 4 */ #define pgd_ERROR(e) \ diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 31c8b3094dd7..5de85dccc09c 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -767,13 +767,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } -static const char *ipi_types[NR_IPI] __tracepoint_string = { +static const char *ipi_types[MAX_IPI] __tracepoint_string = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNC] = "Function call interrupts", [IPI_CPU_STOP] = "CPU stop interrupts", [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", [IPI_TIMER] = "Timer broadcast interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", + [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts", }; static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); @@ -784,7 +786,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) { unsigned int cpu, i; - for (i = 0; i < NR_IPI; i++) { + for (i = 0; i < MAX_IPI; i++) { seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : ""); for_each_online_cpu(cpu) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 720336d28856..1bf483ec971d 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2141,7 +2141,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx); if (flags & BPF_TRAMP_F_CALL_ORIG) { - emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); + emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); emit_call((const u64)__bpf_tramp_enter, ctx); } @@ -2185,7 +2185,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, if (flags & BPF_TRAMP_F_CALL_ORIG) { im->ip_epilogue = ctx->ro_image + ctx->idx; - emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); + emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); emit_call((const u64)__bpf_tramp_exit, ctx); } diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c index 621ad7634df7..a6e4b605bfa8 100644 --- a/arch/loongarch/kernel/hw_breakpoint.c +++ b/arch/loongarch/kernel/hw_breakpoint.c @@ -221,7 +221,7 @@ static int hw_breakpoint_control(struct perf_event *bp, } enable = csr_read64(LOONGARCH_CSR_CRMD); csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD); - if (bp->hw.target) + if (bp->hw.target && test_tsk_thread_flag(bp->hw.target, TIF_LOAD_WATCH)) regs->csr_prmd |= CSR_PRMD_PWE; break; case HW_BREAKPOINT_UNINSTALL: diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index 200109de1971..19dc6eff45cc 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -589,6 +589,7 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type, struct perf_event *bp; struct perf_event_attr attr; struct arch_hw_breakpoint_ctrl ctrl; + struct thread_info *ti = task_thread_info(tsk); bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); if (IS_ERR(bp)) @@ -613,8 +614,10 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type, if (err) return err; attr.disabled = 0; + set_ti_thread_flag(ti, TIF_LOAD_WATCH); } else { attr.disabled = 1; + clear_ti_thread_flag(ti, TIF_LOAD_WATCH); } return modify_user_hw_breakpoint(bp, &attr); diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c index d4b170c861bf..0147130dc34e 100644 --- a/arch/m68k/amiga/config.c +++ b/arch/m68k/amiga/config.c @@ -180,6 +180,15 @@ int __init amiga_parse_bootinfo(const struct bi_record *record) dev->slotsize = be16_to_cpu(cd->cd_SlotSize); dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr); dev->boardsize = be32_to_cpu(cd->cd_BoardSize); + + /* CS-LAB Warp 1260 workaround */ + if (be16_to_cpu(dev->rom.er_Manufacturer) == ZORRO_MANUF(ZORRO_PROD_CSLAB_WARP_1260) && + dev->rom.er_Product == ZORRO_PROD(ZORRO_PROD_CSLAB_WARP_1260)) { + + /* turn off all interrupts */ + pr_info("Warp 1260 card detected: applying interrupt storm workaround\n"); + *(uint32_t *)(dev->boardaddr + 0x1000) = 0xfff; + } } else pr_warn("amiga_parse_bootinfo: too many AutoConfig devices\n"); #endif /* CONFIG_ZORRO */ diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c index 23256434191c..0465444ceb21 100644 --- a/arch/m68k/atari/ataints.c +++ b/arch/m68k/atari/ataints.c @@ -301,11 +301,7 @@ void __init atari_init_IRQ(void) if (ATARIHW_PRESENT(SCU)) { /* init the SCU if present */ - tt_scu.sys_mask = 0x10; /* enable VBL (for the cursor) and - * disable HSYNC interrupts (who - * needs them?) MFP and SCC are - * enabled in VME mask - */ + tt_scu.sys_mask = 0x0; /* disable all interrupts */ tt_scu.vme_mask = 0x60; /* enable MFP and SCC ints */ } else { /* If no SCU and no Hades, the HSYNC interrupt needs to be diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h index d7f3de9c5d6f..4ba14f3535fc 100644 --- a/arch/m68k/include/asm/cmpxchg.h +++ b/arch/m68k/include/asm/cmpxchg.h @@ -32,7 +32,7 @@ static inline unsigned long __arch_xchg(unsigned long x, volatile void * ptr, in x = tmp; break; default: - tmp = __invalid_xchg_size(x, ptr, size); + x = __invalid_xchg_size(x, ptr, size); break; } diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 80aecba24892..5785a3d5ccfb 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -170,7 +170,7 @@ cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=mips4) \ -Wa,--trap cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=mips4) \ -Wa,--trap -cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=mips64r1) \ +cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=mips64) \ -Wa,--trap cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-mno-mdmx) cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-mno-mips3d) diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi index ee3e2153dd13..c0be84a6e81f 100644 --- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi +++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi @@ -23,14 +23,6 @@ cpu0: cpu@0 { }; }; - memory@200000 { - compatible = "memory"; - device_type = "memory"; - reg = <0x00000000 0x00200000 0x00000000 0x0ee00000>, /* 238 MB at 2 MB */ - <0x00000000 0x20000000 0x00000000 0x1f000000>, /* 496 MB at 512 MB */ - <0x00000001 0x10000000 0x00000001 0xb0000000>; /* 6912 MB at 4352MB */ - }; - cpu_clk: cpu_clk { #clock-cells = <0>; compatible = "fixed-clock"; @@ -52,6 +44,13 @@ package0: bus@10000000 { 0 0x40000000 0 0x40000000 0 0x40000000 0xfe 0x00000000 0xfe 0x00000000 0 0x40000000>; + isa@18000000 { + compatible = "isa"; + #size-cells = <1>; + #address-cells = <2>; + ranges = <1 0x0 0x0 0x18000000 0x4000>; + }; + pm: reset-controller@1fe07000 { compatible = "loongson,ls2k-pm"; reg = <0 0x1fe07000 0 0x422>; @@ -137,7 +136,8 @@ gmac@3,0 { <13 IRQ_TYPE_LEVEL_LOW>; interrupt-names = "macirq", "eth_lpi"; interrupt-parent = <&liointc0>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; + phy-handle = <&phy1>; mdio { #address-cells = <1>; #size-cells = <0>; @@ -160,7 +160,8 @@ gmac@3,1 { <15 IRQ_TYPE_LEVEL_LOW>; interrupt-names = "macirq", "eth_lpi"; interrupt-parent = <&liointc0>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; + phy-handle = <&phy1>; mdio { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h index e007edd6b60a..9218b3ae3383 100644 --- a/arch/mips/include/asm/mach-loongson64/boot_param.h +++ b/arch/mips/include/asm/mach-loongson64/boot_param.h @@ -42,12 +42,14 @@ enum loongson_cpu_type { Legacy_1B = 0x5, Legacy_2G = 0x6, Legacy_2H = 0x7, + Legacy_2K = 0x8, Loongson_1A = 0x100, Loongson_1B = 0x101, Loongson_2E = 0x200, Loongson_2F = 0x201, Loongson_2G = 0x202, Loongson_2H = 0x203, + Loongson_2K = 0x204, Loongson_3A = 0x300, Loongson_3B = 0x301 }; diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index c2930a75b7e4..1e782275850a 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -240,6 +240,10 @@ GCR_ACCESSOR_RO(32, 0x0d0, gic_status) GCR_ACCESSOR_RO(32, 0x0f0, cpc_status) #define CM_GCR_CPC_STATUS_EX BIT(0) +/* GCR_ACCESS - Controls core/IOCU access to GCRs */ +GCR_ACCESSOR_RW(32, 0x120, access_cm3) +#define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0) + /* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */ GCR_ACCESSOR_RW(32, 0x130, l2_config) #define CM_GCR_L2_CONFIG_BYPASS BIT(20) diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 9cc087dd1c19..395622c37325 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -317,7 +317,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id) write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB); /* Ensure the core can access the GCRs */ - set_gcr_access(1 << core); + if (mips_cm_revision() < CM_REV_CM3) + set_gcr_access(1 << core); + else + set_gcr_access_cm3(1 << core); if (mips_cpc_present()) { /* Reset the core */ diff --git a/arch/mips/loongson64/env.c b/arch/mips/loongson64/env.c index ef3750a6ffac..09ff05269861 100644 --- a/arch/mips/loongson64/env.c +++ b/arch/mips/loongson64/env.c @@ -88,6 +88,12 @@ void __init prom_lefi_init_env(void) cpu_clock_freq = ecpu->cpu_clock_freq; loongson_sysconf.cputype = ecpu->cputype; switch (ecpu->cputype) { + case Legacy_2K: + case Loongson_2K: + smp_group[0] = 0x900000001fe11000; + loongson_sysconf.cores_per_node = 2; + loongson_sysconf.cores_per_package = 2; + break; case Legacy_3A: case Loongson_3A: loongson_sysconf.cores_per_node = 4; @@ -221,6 +227,8 @@ void __init prom_lefi_init_env(void) default: break; } + } else if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R) { + loongson_fdt_blob = __dtb_loongson64_2core_2k1000_begin; } else if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G) { if (loongson_sysconf.bridgetype == LS7A) loongson_fdt_blob = __dtb_loongson64g_4core_ls7a_begin; diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c index e01c8d4a805a..3e20ade0503a 100644 --- a/arch/mips/loongson64/reset.c +++ b/arch/mips/loongson64/reset.c @@ -11,6 +11,7 @@ #include <linux/init.h> #include <linux/kexec.h> #include <linux/pm.h> +#include <linux/reboot.h> #include <linux/slab.h> #include <asm/bootinfo.h> @@ -21,36 +22,21 @@ #include <loongson.h> #include <boot_param.h> -static void loongson_restart(char *command) +static int firmware_restart(struct sys_off_data *unusedd) { void (*fw_restart)(void) = (void *)loongson_sysconf.restart_addr; fw_restart(); - while (1) { - if (cpu_wait) - cpu_wait(); - } + return NOTIFY_DONE; } -static void loongson_poweroff(void) +static int firmware_poweroff(struct sys_off_data *unused) { void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; fw_poweroff(); - while (1) { - if (cpu_wait) - cpu_wait(); - } -} - -static void loongson_halt(void) -{ - pr_notice("\n\n** You can safely turn off the power now **\n\n"); - while (1) { - if (cpu_wait) - cpu_wait(); - } + return NOTIFY_DONE; } #ifdef CONFIG_KEXEC_CORE @@ -154,9 +140,17 @@ static void loongson_crash_shutdown(struct pt_regs *regs) static int __init mips_reboot_setup(void) { - _machine_restart = loongson_restart; - _machine_halt = loongson_halt; - pm_power_off = loongson_poweroff; + if (loongson_sysconf.restart_addr) { + register_sys_off_handler(SYS_OFF_MODE_RESTART, + SYS_OFF_PRIO_FIRMWARE, + firmware_restart, NULL); + } + + if (loongson_sysconf.poweroff_addr) { + register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_FIRMWARE, + firmware_poweroff, NULL); + } #ifdef CONFIG_KEXEC_CORE kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL); diff --git a/arch/mips/loongson64/smp.c b/arch/mips/loongson64/smp.c index 5a990cdef91a..66d049cdcf14 100644 --- a/arch/mips/loongson64/smp.c +++ b/arch/mips/loongson64/smp.c @@ -466,12 +466,25 @@ static void loongson3_smp_finish(void) static void __init loongson3_smp_setup(void) { int i = 0, num = 0; /* i: physical id, num: logical id */ + int max_cpus = 0; init_cpu_possible(cpu_none_mask); + for (i = 0; i < ARRAY_SIZE(smp_group); i++) { + if (!smp_group[i]) + break; + max_cpus += loongson_sysconf.cores_per_node; + } + + if (max_cpus < loongson_sysconf.nr_cpus) { + pr_err("SMP Groups are less than the number of CPUs\n"); + loongson_sysconf.nr_cpus = max_cpus ? max_cpus : 1; + } + /* For unified kernel, NR_CPUS is the maximum possible value, * loongson_sysconf.nr_cpus is the really present value */ + i = 0; while (i < loongson_sysconf.nr_cpus) { if (loongson_sysconf.reserved_cpus_mask & (1<<i)) { /* Reserved physical CPU cores */ @@ -492,14 +505,14 @@ static void __init loongson3_smp_setup(void) __cpu_logical_map[num] = -1; num++; } - csr_ipi_probe(); ipi_set0_regs_init(); ipi_clear0_regs_init(); ipi_status0_regs_init(); ipi_en0_regs_init(); ipi_mailbox_buf_init(); - ipi_write_enable(0); + if (smp_group[0]) + ipi_write_enable(0); cpu_set_core(&cpu_data[0], cpu_logical_map(0) % loongson_sysconf.cores_per_package); @@ -818,6 +831,9 @@ static int loongson3_disable_clock(unsigned int cpu) uint64_t core_id = cpu_core(&cpu_data[cpu]); uint64_t package_id = cpu_data[cpu].package; + if (!loongson_chipcfg[package_id] || !loongson_freqctrl[package_id]) + return 0; + if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) { LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id)); } else { @@ -832,6 +848,9 @@ static int loongson3_enable_clock(unsigned int cpu) uint64_t core_id = cpu_core(&cpu_data[cpu]); uint64_t package_id = cpu_data[cpu].package; + if (!loongson_chipcfg[package_id] || !loongson_freqctrl[package_id]) + return 0; + if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) { LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id); } else { diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c old mode 100755 new mode 100644 diff --git a/arch/mips/sgi-ip30/ip30-console.c b/arch/mips/sgi-ip30/ip30-console.c index 7c6dcf6e73f7..a5f10097b985 100644 --- a/arch/mips/sgi-ip30/ip30-console.c +++ b/arch/mips/sgi-ip30/ip30-console.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/io.h> +#include <linux/processor.h> #include <asm/sn/ioc3.h> #include <asm/setup.h> diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index dc9b902de8ea..9656e956ed13 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -86,6 +86,7 @@ config PARISC select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS select TRACE_IRQFLAGS_SUPPORT select HAVE_FUNCTION_DESCRIPTORS if 64BIT + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config index 524db76f47b7..8aff83217397 100644 --- a/arch/powerpc/configs/85xx-hw.config +++ b/arch/powerpc/configs/85xx-hw.config @@ -24,6 +24,7 @@ CONFIG_FS_ENET=y CONFIG_FSL_CORENET_CF=y CONFIG_FSL_DMA=y CONFIG_FSL_HV_MANAGER=y +CONFIG_FSL_IFC=y CONFIG_FSL_PQ_MDIO=y CONFIG_FSL_RIO=y CONFIG_FSL_XGMAC_MDIO=y @@ -58,6 +59,7 @@ CONFIG_INPUT_FF_MEMLESS=m CONFIG_MARVELL_PHY=y CONFIG_MDIO_BUS_MUX_GPIO=y CONFIG_MDIO_BUS_MUX_MMIOREG=y +CONFIG_MEMORY=y CONFIG_MMC_SDHCI_OF_ESDHC=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI=y diff --git a/arch/powerpc/include/asm/guest-state-buffer.h b/arch/powerpc/include/asm/guest-state-buffer.h index 808149f31576..d107abe1468f 100644 --- a/arch/powerpc/include/asm/guest-state-buffer.h +++ b/arch/powerpc/include/asm/guest-state-buffer.h @@ -81,6 +81,7 @@ #define KVMPPC_GSID_HASHKEYR 0x1050 #define KVMPPC_GSID_HASHPKEYR 0x1051 #define KVMPPC_GSID_CTRL 0x1052 +#define KVMPPC_GSID_DPDES 0x1053 #define KVMPPC_GSID_CR 0x2000 #define KVMPPC_GSID_PIDR 0x2001 @@ -110,7 +111,7 @@ #define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1) #define KVMPPC_GSE_DW_REGS_START KVMPPC_GSID_GPR(0) -#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_CTRL +#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_DPDES #define KVMPPC_GSE_DW_REGS_COUNT \ (KVMPPC_GSE_DW_REGS_END - KVMPPC_GSE_DW_REGS_START + 1) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 3e1e2a698c9e..10618622d7ef 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -594,6 +594,7 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB) +KVMPPC_BOOK3S_VCORE_ACCESSOR(dpdes, 64, KVMPPC_GSID_DPDES) KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR) KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR) KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 60819751e55e..0be07ed407c7 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -331,6 +331,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, void *data) { const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be32 *cpu_version = NULL; const __be32 *prop; const __be32 *intserv; int i, nthreads; @@ -420,7 +421,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, prop = of_get_flat_dt_prop(node, "cpu-version", NULL); if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) { identify_cpu(0, be32_to_cpup(prop)); - seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(prop)); + cpu_version = prop; } check_cpu_feature_properties(node); @@ -431,6 +432,12 @@ static int __init early_init_dt_scan_cpus(unsigned long node, } identical_pvr_fixup(node); + + // We can now add the CPU name & PVR to the hardware description + seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR)); + if (cpu_version) + seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(cpu_version)); + init_mmu_slb_size(node); #ifdef CONFIG_PPC64 @@ -881,9 +888,6 @@ void __init early_init_devtree(void *params) dt_cpu_ftrs_scan(); - // We can now add the CPU name & PVR to the hardware description - seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR)); - /* Retrieve CPU related informations from the flat tree * (altivec support, boot CPU ID, ...) */ diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c index 72b12bc10f90..222aa326dace 100644 --- a/arch/powerpc/kexec/core_64.c +++ b/arch/powerpc/kexec/core_64.c @@ -467,9 +467,15 @@ static int add_node_props(void *fdt, int node_offset, const struct device_node * * @fdt: Flattened device tree of the kernel. * * Returns 0 on success, negative errno on error. + * + * Note: expecting no subnodes under /cpus/<node> with device_type == "cpu". + * If this changes, update this function to include them. */ int update_cpus_node(void *fdt) { + int prev_node_offset; + const char *device_type; + const struct fdt_property *prop; struct device_node *cpus_node, *dn; int cpus_offset, cpus_subnode_offset, ret = 0; @@ -480,30 +486,44 @@ int update_cpus_node(void *fdt) return cpus_offset; } - if (cpus_offset > 0) { - ret = fdt_del_node(fdt, cpus_offset); + prev_node_offset = cpus_offset; + /* Delete sub-nodes of /cpus node with device_type == "cpu" */ + for (cpus_subnode_offset = fdt_first_subnode(fdt, cpus_offset); cpus_subnode_offset >= 0;) { + /* Ignore nodes that do not have a device_type property or device_type != "cpu" */ + prop = fdt_get_property(fdt, cpus_subnode_offset, "device_type", NULL); + if (!prop || strcmp(prop->data, "cpu")) { + prev_node_offset = cpus_subnode_offset; + goto next_node; + } + + ret = fdt_del_node(fdt, cpus_subnode_offset); if (ret < 0) { - pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret)); - return -EINVAL; + pr_err("Failed to delete a cpus sub-node: %s\n", fdt_strerror(ret)); + return ret; } +next_node: + if (prev_node_offset == cpus_offset) + cpus_subnode_offset = fdt_first_subnode(fdt, cpus_offset); + else + cpus_subnode_offset = fdt_next_subnode(fdt, prev_node_offset); } - /* Add cpus node to fdt */ - cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus"); - if (cpus_offset < 0) { - pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset)); + cpus_node = of_find_node_by_path("/cpus"); + /* Fail here to avoid kexec/kdump kernel boot hung */ + if (!cpus_node) { + pr_err("No /cpus node found\n"); return -EINVAL; } - /* Add cpus node properties */ - cpus_node = of_find_node_by_path("/cpus"); - ret = add_node_props(fdt, cpus_offset, cpus_node); - of_node_put(cpus_node); - if (ret < 0) - return ret; + /* Add all /cpus sub-nodes of device_type == "cpu" to FDT */ + for_each_child_of_node(cpus_node, dn) { + /* Ignore device nodes that do not have a device_type property + * or device_type != "cpu". + */ + device_type = of_get_property(dn, "device_type", NULL); + if (!device_type || strcmp(device_type, "cpu")) + continue; - /* Loop through all subnodes of cpus and add them to fdt */ - for_each_node_by_type(dn, "cpu") { cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name); if (cpus_subnode_offset < 0) { pr_err("Unable to add %s subnode: %s\n", dn->full_name, @@ -517,6 +537,7 @@ int update_cpus_node(void *fdt) goto out; } out: + of_node_put(cpus_node); of_node_put(dn); return ret; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index daaf7faf21a5..d8352e4d9cdc 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2305,7 +2305,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu)); break; case KVM_REG_PPC_SDAR: - *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu)); + *val = get_reg_val(id, kvmppc_get_sdar_hv(vcpu)); break; case KVM_REG_PPC_SIER: *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 0)); @@ -2540,7 +2540,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, vcpu->arch.mmcrs = set_reg_val(id, *val); break; case KVM_REG_PPC_MMCR3: - *val = get_reg_val(id, vcpu->arch.mmcr[3]); + kvmppc_set_mmcr_hv(vcpu, 3, set_reg_val(id, *val)); break; case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: i = id - KVM_REG_PPC_PMC1; @@ -4116,6 +4116,11 @@ static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit, int trap; long rc; + if (vcpu->arch.doorbell_request) { + vcpu->arch.doorbell_request = 0; + kvmppc_set_dpdes(vcpu, 1); + } + io = &vcpu->arch.nestedv2_io; msr = mfmsr(); diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c index 1091f7a83b25..342f58314770 100644 --- a/arch/powerpc/kvm/book3s_hv_nestedv2.c +++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c @@ -311,6 +311,10 @@ static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb, rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.vcore->vtb); break; + case KVMPPC_GSID_DPDES: + rc = kvmppc_gse_put_u64(gsb, iden, + vcpu->arch.vcore->dpdes); + break; case KVMPPC_GSID_LPCR: rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.vcore->lpcr); @@ -543,6 +547,9 @@ static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm, case KVMPPC_GSID_VTB: vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse); break; + case KVMPPC_GSID_DPDES: + vcpu->arch.vcore->dpdes = kvmppc_gse_get_u64(gse); + break; case KVMPPC_GSID_LPCR: vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse); break; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index d32abe7fe6ab..d11767208bfc 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1984,8 +1984,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, break; r = -ENXIO; - if (!xive_enabled()) + if (!xive_enabled()) { + fdput(f); break; + } r = -EPERM; dev = kvm_device_from_filp(f.file); diff --git a/arch/powerpc/kvm/test-guest-state-buffer.c b/arch/powerpc/kvm/test-guest-state-buffer.c index 4720b8dc8837..2571ccc618c9 100644 --- a/arch/powerpc/kvm/test-guest-state-buffer.c +++ b/arch/powerpc/kvm/test-guest-state-buffer.c @@ -151,7 +151,7 @@ static void test_gs_bitmap(struct kunit *test) i++; } - for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSID_CTRL; iden++) { + for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSE_DW_REGS_END; iden++) { kvmppc_gsbm_set(&gsbm, iden); kvmppc_gsbm_set(&gsbm1, iden); KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden)); diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 43d4842bb1c7..d93433e26ded 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -94,7 +94,8 @@ static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, return -EINVAL; set_huge_pte_at(&init_mm, va, ptep, - pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), psize); + pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), + 1UL << mmu_psize_to_shift(psize)); return 0; } diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c index 75fa98221d48..af105e1bc3fc 100644 --- a/arch/powerpc/xmon/ppc-dis.c +++ b/arch/powerpc/xmon/ppc-dis.c @@ -122,32 +122,21 @@ int print_insn_powerpc (unsigned long insn, unsigned long memaddr) bool insn_is_short; ppc_cpu_t dialect; - dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON - | PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC; + dialect = PPC_OPCODE_PPC | PPC_OPCODE_COMMON; - if (cpu_has_feature(CPU_FTRS_POWER5)) - dialect |= PPC_OPCODE_POWER5; + if (IS_ENABLED(CONFIG_PPC64)) + dialect |= PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_CELL | + PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 | PPC_OPCODE_POWER8 | + PPC_OPCODE_POWER9; - if (cpu_has_feature(CPU_FTRS_CELL)) - dialect |= (PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC); + if (cpu_has_feature(CPU_FTR_TM)) + dialect |= PPC_OPCODE_HTM; - if (cpu_has_feature(CPU_FTRS_POWER6)) - dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC); + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + dialect |= PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2; - if (cpu_has_feature(CPU_FTRS_POWER7)) - dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 - | PPC_OPCODE_ALTIVEC | PPC_OPCODE_VSX); - - if (cpu_has_feature(CPU_FTRS_POWER8)) - dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 - | PPC_OPCODE_POWER8 | PPC_OPCODE_HTM - | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 | PPC_OPCODE_VSX); - - if (cpu_has_feature(CPU_FTRS_POWER9)) - dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 - | PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM - | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 - | PPC_OPCODE_VSX | PPC_OPCODE_VSX3); + if (cpu_has_feature(CPU_FTR_VSX)) + dialect |= PPC_OPCODE_VSX | PPC_OPCODE_VSX3; /* Get the major opcode of the insn. */ opcode = NULL; diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 4236a69c35cb..a00f7523cb91 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -165,9 +165,20 @@ secondary_start_sbi: #endif call .Lsetup_trap_vector scs_load_current - tail smp_callin + call smp_callin #endif /* CONFIG_SMP */ +.align 2 +.Lsecondary_park: + /* + * Park this hart if we: + * - have too many harts on CONFIG_RISCV_BOOT_SPINWAIT + * - receive an early trap, before setup_trap_vector finished + * - fail in smp_callin(), as a successful one wouldn't return + */ + wfi + j .Lsecondary_park + .align 2 .Lsetup_trap_vector: /* Set trap vector to exception handler */ @@ -181,12 +192,6 @@ secondary_start_sbi: csrw CSR_SCRATCH, zero ret -.align 2 -.Lsecondary_park: - /* We lack SMP support or have too many harts, so park this hart */ - wfi - j .Lsecondary_park - SYM_CODE_END(_start) SYM_CODE_START(_start_kernel) diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 1319b29ce3b5..19baf0d574d3 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -214,6 +214,15 @@ asmlinkage __visible void smp_callin(void) struct mm_struct *mm = &init_mm; unsigned int curr_cpuid = smp_processor_id(); + if (has_vector()) { + /* + * Return as early as possible so the hart with a mismatching + * vlen won't boot. + */ + if (riscv_v_setup_vsize()) + return; + } + /* All kernel threads share the same mm context. */ mmgrab(mm); current->active_mm = mm; @@ -226,11 +235,6 @@ asmlinkage __visible void smp_callin(void) numa_add_cpu(curr_cpuid); set_cpu_online(curr_cpuid, true); - if (has_vector()) { - if (riscv_v_setup_vsize()) - elf_hwcap &= ~COMPAT_HWCAP_ISA_V; - } - riscv_user_isa_enable(); /* diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 79a001d5533e..212b015e09b7 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -16,6 +16,8 @@ #include "bpf_jit.h" #define RV_FENTRY_NINSNS 2 +/* imm that allows emit_imm to emit max count insns */ +#define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF #define RV_REG_TCC RV_REG_A6 #define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */ @@ -915,7 +917,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, orig_call += RV_FENTRY_NINSNS * 4; if (flags & BPF_TRAMP_F_CALL_ORIG) { - emit_imm(RV_REG_A0, (const s64)im, ctx); + emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx); ret = emit_call((const u64)__bpf_tramp_enter, true, ctx); if (ret) return ret; @@ -976,7 +978,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, if (flags & BPF_TRAMP_F_CALL_ORIG) { im->ip_epilogue = ctx->insns + ctx->ninsns; - emit_imm(RV_REG_A0, (const s64)im, ctx); + emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx); ret = emit_call((const u64)__bpf_tramp_exit, true, ctx); if (ret) goto out; @@ -1045,6 +1047,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, { int ret; struct rv_jit_context ctx; + u32 size = image_end - image; ctx.ninsns = 0; /* @@ -1058,11 +1061,16 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, ctx.ro_insns = image; ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx); if (ret < 0) - return ret; + goto out; - bpf_flush_icache(ctx.insns, ctx.insns + ctx.ninsns); + if (WARN_ON(size < ninsns_rvoff(ctx.ninsns))) { + ret = -E2BIG; + goto out; + } - return ninsns_rvoff(ret); + bpf_flush_icache(image, image_end); +out: + return ret < 0 ? ret : size; } int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 1434642e9cba..6968be98af11 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -556,25 +556,31 @@ static int cfdiag_diffctr(struct cpu_cf_events *cpuhw, unsigned long auth) struct cf_trailer_entry *trailer_start, *trailer_stop; struct cf_ctrset_entry *ctrstart, *ctrstop; size_t offset = 0; + int i; - auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1; - do { + for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) { ctrstart = (struct cf_ctrset_entry *)(cpuhw->start + offset); ctrstop = (struct cf_ctrset_entry *)(cpuhw->stop + offset); + /* Counter set not authorized */ + if (!(auth & cpumf_ctr_ctl[i])) + continue; + /* Counter set size zero was not saved */ + if (!cpum_cf_read_setsize(i)) + continue; + if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) { pr_err_once("cpum_cf_diag counter set compare error " "in set %i\n", ctrstart->set); return 0; } - auth &= ~cpumf_ctr_ctl[ctrstart->set]; if (ctrstart->def == CF_DIAG_CTRSET_DEF) { cfdiag_diffctrset((u64 *)(ctrstart + 1), (u64 *)(ctrstop + 1), ctrstart->ctr); offset += ctrstart->ctr * sizeof(u64) + sizeof(*ctrstart); } - } while (ctrstart->def && auth); + } /* Save time_stamp from start of event in stop's trailer */ trailer_start = (struct cf_trailer_entry *)(cpuhw->start + offset); diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 90c2c786bb35..610e6f794511 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -149,7 +149,7 @@ unsigned long __bootdata_preserved(max_mappable); struct physmem_info __bootdata(physmem_info); struct vm_layout __bootdata_preserved(vm_layout); -EXPORT_SYMBOL_GPL(vm_layout); +EXPORT_SYMBOL(vm_layout); int __bootdata_preserved(__kaslr_enabled); unsigned int __bootdata_preserved(zlib_dfltcc_support); EXPORT_SYMBOL(zlib_dfltcc_support); diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index 265fea37e030..016993e9eb72 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -318,6 +318,13 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) rc = make_folio_secure(folio, uvcb); folio_unlock(folio); } + + /* + * Once we drop the PTL, the folio may get unmapped and + * freed immediately. We need a temporary reference. + */ + if (rc == -EAGAIN) + folio_get(folio); } unlock: pte_unmap_unlock(ptep, ptelock); @@ -330,6 +337,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) * completion, this is just a useless check, but it is safe. */ folio_wait_writeback(folio); + folio_put(folio); } else if (rc == -EBUSY) { /* * If we have tried a local drain and the folio refcount diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 54b5b2565df8..4a74effe6870 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5749,6 +5749,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, { gpa_t size; + if (kvm_is_ucontrol(kvm)) + return -EINVAL; + /* When we are protected, we should not change the memory slots */ if (kvm_s390_pv_get_handle(kvm)) return -EINVAL; diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c index 0ef83b6ac0db..84482a921332 100644 --- a/arch/s390/pci/pci_irq.c +++ b/arch/s390/pci/pci_irq.c @@ -268,33 +268,20 @@ static void zpci_floating_irq_handler(struct airq_struct *airq, } } -int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +static int __alloc_airq(struct zpci_dev *zdev, int msi_vecs, + unsigned long *bit) { - struct zpci_dev *zdev = to_zpci(pdev); - unsigned int hwirq, msi_vecs, cpu; - unsigned long bit; - struct msi_desc *msi; - struct msi_msg msg; - int cpu_addr; - int rc, irq; - - zdev->aisb = -1UL; - zdev->msi_first_bit = -1U; - if (type == PCI_CAP_ID_MSI && nvec > 1) - return 1; - msi_vecs = min_t(unsigned int, nvec, zdev->max_msi); - if (irq_delivery == DIRECTED) { /* Allocate cpu vector bits */ - bit = airq_iv_alloc(zpci_ibv[0], msi_vecs); - if (bit == -1UL) + *bit = airq_iv_alloc(zpci_ibv[0], msi_vecs); + if (*bit == -1UL) return -EIO; } else { /* Allocate adapter summary indicator bit */ - bit = airq_iv_alloc_bit(zpci_sbv); - if (bit == -1UL) + *bit = airq_iv_alloc_bit(zpci_sbv); + if (*bit == -1UL) return -EIO; - zdev->aisb = bit; + zdev->aisb = *bit; /* Create adapter interrupt vector */ zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK, NULL); @@ -302,27 +289,66 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) return -ENOMEM; /* Wire up shortcut pointer */ - zpci_ibv[bit] = zdev->aibv; + zpci_ibv[*bit] = zdev->aibv; /* Each function has its own interrupt vector */ - bit = 0; + *bit = 0; } + return 0; +} - /* Request MSI interrupts */ +int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + unsigned int hwirq, msi_vecs, irqs_per_msi, i, cpu; + struct zpci_dev *zdev = to_zpci(pdev); + struct msi_desc *msi; + struct msi_msg msg; + unsigned long bit; + int cpu_addr; + int rc, irq; + + zdev->aisb = -1UL; + zdev->msi_first_bit = -1U; + + msi_vecs = min_t(unsigned int, nvec, zdev->max_msi); + if (msi_vecs < nvec) { + pr_info("%s requested %d irqs, allocate system limit of %d", + pci_name(pdev), nvec, zdev->max_msi); + } + + rc = __alloc_airq(zdev, msi_vecs, &bit); + if (rc < 0) + return rc; + + /* + * Request MSI interrupts: + * When using MSI, nvec_used interrupt sources and their irq + * descriptors are controlled through one msi descriptor. + * Thus the outer loop over msi descriptors shall run only once, + * while two inner loops iterate over the interrupt vectors. + * When using MSI-X, each interrupt vector/irq descriptor + * is bound to exactly one msi descriptor (nvec_used is one). + * So the inner loops are executed once, while the outer iterates + * over the MSI-X descriptors. + */ hwirq = bit; msi_for_each_desc(msi, &pdev->dev, MSI_DESC_NOTASSOCIATED) { - rc = -EIO; if (hwirq - bit >= msi_vecs) break; - irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, - (irq_delivery == DIRECTED) ? - msi->affinity : NULL); + irqs_per_msi = min_t(unsigned int, msi_vecs, msi->nvec_used); + irq = __irq_alloc_descs(-1, 0, irqs_per_msi, 0, THIS_MODULE, + (irq_delivery == DIRECTED) ? + msi->affinity : NULL); if (irq < 0) return -ENOMEM; - rc = irq_set_msi_desc(irq, msi); - if (rc) - return rc; - irq_set_chip_and_handler(irq, &zpci_irq_chip, - handle_percpu_irq); + + for (i = 0; i < irqs_per_msi; i++) { + rc = irq_set_msi_desc_off(irq, i, msi); + if (rc) + return rc; + irq_set_chip_and_handler(irq + i, &zpci_irq_chip, + handle_percpu_irq); + } + msg.data = hwirq - bit; if (irq_delivery == DIRECTED) { if (msi->affinity) @@ -335,31 +361,35 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) msg.address_lo |= (cpu_addr << 8); for_each_possible_cpu(cpu) { - airq_iv_set_data(zpci_ibv[cpu], hwirq, irq); + for (i = 0; i < irqs_per_msi; i++) + airq_iv_set_data(zpci_ibv[cpu], + hwirq + i, irq + i); } } else { msg.address_lo = zdev->msi_addr & 0xffffffff; - airq_iv_set_data(zdev->aibv, hwirq, irq); + for (i = 0; i < irqs_per_msi; i++) + airq_iv_set_data(zdev->aibv, hwirq + i, irq + i); } msg.address_hi = zdev->msi_addr >> 32; pci_write_msi_msg(irq, &msg); - hwirq++; + hwirq += irqs_per_msi; } zdev->msi_first_bit = bit; - zdev->msi_nr_irqs = msi_vecs; + zdev->msi_nr_irqs = hwirq - bit; rc = zpci_set_irq(zdev); if (rc) return rc; - return (msi_vecs == nvec) ? 0 : msi_vecs; + return (zdev->msi_nr_irqs == nvec) ? 0 : zdev->msi_nr_irqs; } void arch_teardown_msi_irqs(struct pci_dev *pdev) { struct zpci_dev *zdev = to_zpci(pdev); struct msi_desc *msi; + unsigned int i; int rc; /* Disable interrupts */ @@ -369,8 +399,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) /* Release MSI interrupts */ msi_for_each_desc(msi, &pdev->dev, MSI_DESC_ASSOCIATED) { - irq_set_msi_desc(msi->irq, NULL); - irq_free_desc(msi->irq); + for (i = 0; i < msi->nvec_used; i++) { + irq_set_msi_desc(msi->irq + i, NULL); + irq_free_desc(msi->irq + i); + } msi->msg.address_lo = 0; msi->msg.address_hi = 0; msi->msg.data = 0; diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h index a67abebd4359..1b86d02a8455 100644 --- a/arch/sparc/include/asm/oplib_64.h +++ b/arch/sparc/include/asm/oplib_64.h @@ -247,6 +247,7 @@ void prom_sun4v_guest_soft_state(void); int prom_ihandle2path(int handle, char *buffer, int bufsize); /* Client interface level routines. */ +void prom_cif_init(void *cif_handler); void p1275_cmd_direct(unsigned long *); #endif /* !(__SPARC64_OPLIB_H) */ diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c index 103aa9104318..f7b8a1a865b8 100644 --- a/arch/sparc/prom/init_64.c +++ b/arch/sparc/prom/init_64.c @@ -26,9 +26,6 @@ phandle prom_chosen_node; * routines in the prom library. * It gets passed the pointer to the PROM vector. */ - -extern void prom_cif_init(void *); - void __init prom_init(void *cif_handler) { phandle node; diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c index 889aa602f8d8..51c3f984bbf7 100644 --- a/arch/sparc/prom/p1275.c +++ b/arch/sparc/prom/p1275.c @@ -49,7 +49,7 @@ void p1275_cmd_direct(unsigned long *args) local_irq_restore(flags); } -void prom_cif_init(void *cif_handler, void *cif_stack) +void prom_cif_init(void *cif_handler) { p1275buf.prom_cif_handler = (void (*)(long *))cif_handler; } diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index ef805eaa9e01..093c87879d08 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -447,43 +447,31 @@ static int bulk_req_safe_read( return n; } -/* Called without dev->lock held, and only in interrupt context. */ -static void ubd_handler(void) +static void ubd_end_request(struct io_thread_req *io_req) { - int n; - int count; - - while(1){ - n = bulk_req_safe_read( - thread_fd, - irq_req_buffer, - &irq_remainder, - &irq_remainder_size, - UBD_REQ_BUFFER_SIZE - ); - if (n < 0) { - if(n == -EAGAIN) - break; - printk(KERN_ERR "spurious interrupt in ubd_handler, " - "err = %d\n", -n); - return; - } - for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { - struct io_thread_req *io_req = (*irq_req_buffer)[count]; - - if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) { - blk_queue_max_discard_sectors(io_req->req->q, 0); - blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); - } - blk_mq_end_request(io_req->req, io_req->error); - kfree(io_req); - } + if (io_req->error == BLK_STS_NOTSUPP) { + if (req_op(io_req->req) == REQ_OP_DISCARD) + blk_queue_max_discard_sectors(io_req->req->q, 0); + else if (req_op(io_req->req) == REQ_OP_WRITE_ZEROES) + blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); } + blk_mq_end_request(io_req->req, io_req->error); + kfree(io_req); } static irqreturn_t ubd_intr(int irq, void *dev) { - ubd_handler(); + int len, i; + + while ((len = bulk_req_safe_read(thread_fd, irq_req_buffer, + &irq_remainder, &irq_remainder_size, + UBD_REQ_BUFFER_SIZE)) >= 0) { + for (i = 0; i < len / sizeof(struct io_thread_req *); i++) + ubd_end_request((*irq_req_buffer)[i]); + } + + if (len < 0 && len != -EAGAIN) + pr_err("spurious interrupt in %s, err = %d\n", __func__, len); return IRQ_HANDLED; } diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index a8bfe8be1526..5b5fd8f68d9c 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -875,9 +875,9 @@ static int setup_time_travel_start(char *str) return 1; } -__setup("time-travel-start", setup_time_travel_start); +__setup("time-travel-start=", setup_time_travel_start); __uml_help(setup_time_travel_start, -"time-travel-start=<seconds>\n" +"time-travel-start=<nanoseconds>\n" "Configure the UML instance's wall clock to start at this value rather than\n" "the host's wall clock at the time of UML boot.\n"); #endif diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c index 787cfb9a0308..b11ed66c8bb0 100644 --- a/arch/um/os-Linux/signal.c +++ b/arch/um/os-Linux/signal.c @@ -8,6 +8,7 @@ #include <stdlib.h> #include <stdarg.h> +#include <stdbool.h> #include <errno.h> #include <signal.h> #include <string.h> @@ -65,9 +66,7 @@ static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc) int signals_enabled; #ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT -static int signals_blocked; -#else -#define signals_blocked 0 +static int signals_blocked, signals_blocked_pending; #endif static unsigned int signals_pending; static unsigned int signals_active = 0; @@ -76,14 +75,27 @@ static void sig_handler(int sig, struct siginfo *si, mcontext_t *mc) { int enabled = signals_enabled; - if ((signals_blocked || !enabled) && (sig == SIGIO)) { +#ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT + if ((signals_blocked || + __atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) && + (sig == SIGIO)) { + /* increment so unblock will do another round */ + __atomic_add_fetch(&signals_blocked_pending, 1, + __ATOMIC_SEQ_CST); + return; + } +#endif + + if (!enabled && (sig == SIGIO)) { /* * In TT_MODE_EXTERNAL, need to still call time-travel - * handlers unless signals are also blocked for the - * external time message processing. This will mark - * signals_pending by itself (only if necessary.) + * handlers. This will mark signals_pending by itself + * (only if necessary.) + * Note we won't get here if signals are hard-blocked + * (which is handled above), in that case the hard- + * unblock will handle things. */ - if (!signals_blocked && time_travel_mode == TT_MODE_EXTERNAL) + if (time_travel_mode == TT_MODE_EXTERNAL) sigio_run_timetravel_handlers(); else signals_pending |= SIGIO_MASK; @@ -380,33 +392,99 @@ int um_set_signals_trace(int enable) #ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT void mark_sigio_pending(void) { + /* + * It would seem that this should be atomic so + * it isn't a read-modify-write with a signal + * that could happen in the middle, losing the + * value set by the signal. + * + * However, this function is only called when in + * time-travel=ext simulation mode, in which case + * the only signal ever pending is SIGIO, which + * is blocked while this can be called, and the + * timer signal (SIGALRM) cannot happen. + */ signals_pending |= SIGIO_MASK; } void block_signals_hard(void) { - if (signals_blocked) - return; - signals_blocked = 1; + signals_blocked++; barrier(); } void unblock_signals_hard(void) { + static bool unblocking; + if (!signals_blocked) + panic("unblocking signals while not blocked"); + + if (--signals_blocked) return; - /* Must be set to 0 before we check the pending bits etc. */ - signals_blocked = 0; + /* + * Must be set to 0 before we check pending so the + * SIGIO handler will run as normal unless we're still + * going to process signals_blocked_pending. + */ barrier(); - if (signals_pending && signals_enabled) { - /* this is a bit inefficient, but that's not really important */ - block_signals(); - unblock_signals(); - } else if (signals_pending & SIGIO_MASK) { - /* we need to run time-travel handlers even if not enabled */ - sigio_run_timetravel_handlers(); + /* + * Note that block_signals_hard()/unblock_signals_hard() can be called + * within the unblock_signals()/sigio_run_timetravel_handlers() below. + * This would still be prone to race conditions since it's actually a + * call _within_ e.g. vu_req_read_message(), where we observed this + * issue, which loops. Thus, if the inner call handles the recorded + * pending signals, we can get out of the inner call with the real + * signal hander no longer blocked, and still have a race. Thus don't + * handle unblocking in the inner call, if it happens, but only in + * the outermost call - 'unblocking' serves as an ownership for the + * signals_blocked_pending decrement. + */ + if (unblocking) + return; + unblocking = true; + + while (__atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) { + if (signals_enabled) { + /* signals are enabled so we can touch this */ + signals_pending |= SIGIO_MASK; + /* + * this is a bit inefficient, but that's + * not really important + */ + block_signals(); + unblock_signals(); + } else { + /* + * we need to run time-travel handlers even + * if not enabled + */ + sigio_run_timetravel_handlers(); + } + + /* + * The decrement of signals_blocked_pending must be atomic so + * that the signal handler will either happen before or after + * the decrement, not during a read-modify-write: + * - If it happens before, it can increment it and we'll + * decrement it and do another round in the loop. + * - If it happens after it'll see 0 for both signals_blocked + * and signals_blocked_pending and thus run the handler as + * usual (subject to signals_enabled, but that's unrelated.) + * + * Note that a call to unblock_signals_hard() within the calls + * to unblock_signals() or sigio_run_timetravel_handlers() above + * will do nothing due to the 'unblocking' state, so this cannot + * underflow as the only one decrementing will be the outermost + * one. + */ + if (__atomic_sub_fetch(&signals_blocked_pending, 1, + __ATOMIC_SEQ_CST) < 0) + panic("signals_blocked_pending underflow"); } + + unblocking = false; } #endif diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler index 59aedf32c4ea..6d20a6ce0507 100644 --- a/arch/x86/Kconfig.assembler +++ b/arch/x86/Kconfig.assembler @@ -36,6 +36,6 @@ config AS_VPCLMULQDQ Supported by binutils >= 2.30 and LLVM integrated assembler config AS_WRUSS - def_bool $(as-instr,wrussq %rax$(comma)(%rbx)) + def_bool $(as-instr64,wrussq %rax$(comma)(%rbx)) help Supported by binutils >= 2.31 and LLVM integrated assembler diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um index 2106a2bd152b..a46b1397ad01 100644 --- a/arch/x86/Makefile.um +++ b/arch/x86/Makefile.um @@ -9,6 +9,7 @@ core-y += arch/x86/crypto/ # ifeq ($(CONFIG_CC_IS_CLANG),y) KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx +KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2 endif diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c index c2235bae17ef..8cc9950d7104 100644 --- a/arch/x86/entry/syscall_32.c +++ b/arch/x86/entry/syscall_32.c @@ -14,9 +14,12 @@ #endif #define __SYSCALL(nr, sym) extern long __ia32_##sym(const struct pt_regs *); - +#define __SYSCALL_NORETURN(nr, sym) extern long __noreturn __ia32_##sym(const struct pt_regs *); #include <asm/syscalls_32.h> -#undef __SYSCALL +#undef __SYSCALL + +#undef __SYSCALL_NORETURN +#define __SYSCALL_NORETURN __SYSCALL /* * The sys_call_table[] is no longer used for system calls, but @@ -28,11 +31,10 @@ const sys_call_ptr_t sys_call_table[] = { #include <asm/syscalls_32.h> }; -#undef __SYSCALL +#undef __SYSCALL #endif #define __SYSCALL(nr, sym) case nr: return __ia32_##sym(regs); - long ia32_sys_call(const struct pt_regs *regs, unsigned int nr) { switch (nr) { diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c index 33b3f09e6f15..ba8354424860 100644 --- a/arch/x86/entry/syscall_64.c +++ b/arch/x86/entry/syscall_64.c @@ -8,8 +8,12 @@ #include <asm/syscall.h> #define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *); +#define __SYSCALL_NORETURN(nr, sym) extern long __noreturn __x64_##sym(const struct pt_regs *); #include <asm/syscalls_64.h> -#undef __SYSCALL +#undef __SYSCALL + +#undef __SYSCALL_NORETURN +#define __SYSCALL_NORETURN __SYSCALL /* * The sys_call_table[] is no longer used for system calls, but @@ -20,10 +24,9 @@ const sys_call_ptr_t sys_call_table[] = { #include <asm/syscalls_64.h> }; -#undef __SYSCALL +#undef __SYSCALL #define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs); - long x64_sys_call(const struct pt_regs *regs, unsigned int nr) { switch (nr) { diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c index 03de4a932131..fb77908f44f3 100644 --- a/arch/x86/entry/syscall_x32.c +++ b/arch/x86/entry/syscall_x32.c @@ -8,11 +8,14 @@ #include <asm/syscall.h> #define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *); +#define __SYSCALL_NORETURN(nr, sym) extern long __noreturn __x64_##sym(const struct pt_regs *); #include <asm/syscalls_x32.h> -#undef __SYSCALL +#undef __SYSCALL -#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs); +#undef __SYSCALL_NORETURN +#define __SYSCALL_NORETURN __SYSCALL +#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs); long x32_sys_call(const struct pt_regs *regs, unsigned int nr) { switch (nr) { diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index d6ebcab1d8b2..4b71a2607bf5 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -2,7 +2,7 @@ # 32-bit system call numbers and entry vectors # # The format is: -# <number> <abi> <name> <entry point> <compat entry point> +# <number> <abi> <name> <entry point> [<compat entry point> [noreturn]] # # The __ia32_sys and __ia32_compat_sys stubs are created on-the-fly for # sys_*() system calls and compat_sys_*() compat system calls if @@ -12,7 +12,7 @@ # The abi is always "i386" for this file. # 0 i386 restart_syscall sys_restart_syscall -1 i386 exit sys_exit +1 i386 exit sys_exit - noreturn 2 i386 fork sys_fork 3 i386 read sys_read 4 i386 write sys_write @@ -263,7 +263,7 @@ 249 i386 io_cancel sys_io_cancel 250 i386 fadvise64 sys_ia32_fadvise64 # 251 is available for reuse (was briefly sys_set_zone_reclaim) -252 i386 exit_group sys_exit_group +252 i386 exit_group sys_exit_group - noreturn 253 i386 lookup_dcookie 254 i386 epoll_create sys_epoll_create 255 i386 epoll_ctl sys_epoll_ctl diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index a396f6e6ab5b..a8068f937290 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -2,7 +2,7 @@ # 64-bit system call numbers and entry vectors # # The format is: -# <number> <abi> <name> <entry point> +# <number> <abi> <name> <entry point> [<compat entry point> [noreturn]] # # The __x64_sys_*() stubs are created on-the-fly for sys_*() system calls # @@ -68,7 +68,7 @@ 57 common fork sys_fork 58 common vfork sys_vfork 59 64 execve sys_execve -60 common exit sys_exit +60 common exit sys_exit - noreturn 61 common wait4 sys_wait4 62 common kill sys_kill 63 common uname sys_newuname @@ -239,7 +239,7 @@ 228 common clock_gettime sys_clock_gettime 229 common clock_getres sys_clock_getres 230 common clock_nanosleep sys_clock_nanosleep -231 common exit_group sys_exit_group +231 common exit_group sys_exit_group - noreturn 232 common epoll_wait sys_epoll_wait 233 common epoll_ctl sys_epoll_ctl 234 common tgkill sys_tgkill diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 4ccb8fa483e6..5a4bfe9aea23 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -639,7 +639,7 @@ void amd_uncore_df_ctx_scan(struct amd_uncore *uncore, unsigned int cpu) info.split.aux_data = 0; info.split.num_pmcs = NUM_COUNTERS_NB; info.split.gid = 0; - info.split.cid = topology_die_id(cpu); + info.split.cid = topology_logical_package_id(cpu); if (pmu_version >= 2) { ebx.full = cpuid_ebx(EXT_PERFMON_DEBUG_FEATURES); @@ -654,17 +654,20 @@ int amd_uncore_df_ctx_init(struct amd_uncore *uncore, unsigned int cpu) { struct attribute **df_attr = amd_uncore_df_format_attr; struct amd_uncore_pmu *pmu; + int num_counters; /* Run just once */ if (uncore->init_done) return amd_uncore_ctx_init(uncore, cpu); + num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu); + if (!num_counters) + goto done; + /* No grouping, single instance for a system */ uncore->pmus = kzalloc(sizeof(*uncore->pmus), GFP_KERNEL); - if (!uncore->pmus) { - uncore->num_pmus = 0; + if (!uncore->pmus) goto done; - } /* * For Family 17h and above, the Northbridge counters are repurposed @@ -674,7 +677,7 @@ int amd_uncore_df_ctx_init(struct amd_uncore *uncore, unsigned int cpu) pmu = &uncore->pmus[0]; strscpy(pmu->name, boot_cpu_data.x86 >= 0x17 ? "amd_df" : "amd_nb", sizeof(pmu->name)); - pmu->num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu); + pmu->num_counters = num_counters; pmu->msr_base = MSR_F15H_NB_PERF_CTL; pmu->rdpmc_base = RDPMC_BASE_NB; pmu->group = amd_uncore_ctx_gid(uncore, cpu); @@ -785,17 +788,20 @@ int amd_uncore_l3_ctx_init(struct amd_uncore *uncore, unsigned int cpu) { struct attribute **l3_attr = amd_uncore_l3_format_attr; struct amd_uncore_pmu *pmu; + int num_counters; /* Run just once */ if (uncore->init_done) return amd_uncore_ctx_init(uncore, cpu); + num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu); + if (!num_counters) + goto done; + /* No grouping, single instance for a system */ uncore->pmus = kzalloc(sizeof(*uncore->pmus), GFP_KERNEL); - if (!uncore->pmus) { - uncore->num_pmus = 0; + if (!uncore->pmus) goto done; - } /* * For Family 17h and above, L3 cache counters are available instead @@ -805,7 +811,7 @@ int amd_uncore_l3_ctx_init(struct amd_uncore *uncore, unsigned int cpu) pmu = &uncore->pmus[0]; strscpy(pmu->name, boot_cpu_data.x86 >= 0x17 ? "amd_l3" : "amd_l2", sizeof(pmu->name)); - pmu->num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu); + pmu->num_counters = num_counters; pmu->msr_base = MSR_F16H_L2I_PERF_CTL; pmu->rdpmc_base = RDPMC_BASE_LLC; pmu->group = amd_uncore_ctx_gid(uncore, cpu); @@ -893,8 +899,8 @@ void amd_uncore_umc_ctx_scan(struct amd_uncore *uncore, unsigned int cpu) cpuid(EXT_PERFMON_DEBUG_FEATURES, &eax, &ebx.full, &ecx, &edx); info.split.aux_data = ecx; /* stash active mask */ info.split.num_pmcs = ebx.split.num_umc_pmc; - info.split.gid = topology_die_id(cpu); - info.split.cid = topology_die_id(cpu); + info.split.gid = topology_logical_package_id(cpu); + info.split.cid = topology_logical_package_id(cpu); *per_cpu_ptr(uncore->info, cpu) = info; } diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 5b0dd07b1ef1..acd367c45334 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2547,6 +2547,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { + static DEFINE_MUTEX(rdpmc_mutex); unsigned long val; ssize_t ret; @@ -2560,6 +2561,8 @@ static ssize_t set_attr_rdpmc(struct device *cdev, if (x86_pmu.attr_rdpmc_broken) return -ENOTSUPP; + guard(mutex)(&rdpmc_mutex); + if (val != x86_pmu.attr_rdpmc) { /* * Changing into or out of never available or always available, diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 9d6e8f13d13a..dd1832055891 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -81,7 +81,7 @@ * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * perf code: 0x03 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL, - * KBL,CML,ICL,TGL,RKL,ADL,RPL,MTL + * KBL,CML,ICL,TGL,RKL * Scope: Package (physical package) * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter. * perf code: 0x04 @@ -90,8 +90,7 @@ * Scope: Package (physical package) * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter. * perf code: 0x05 - * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL, - * ADL,RPL,MTL + * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL * Scope: Package (physical package) * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. * perf code: 0x06 @@ -637,9 +636,7 @@ static const struct cstate_model adl_cstates __initconst = { .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | BIT(PERF_CSTATE_PKG_C3_RES) | BIT(PERF_CSTATE_PKG_C6_RES) | - BIT(PERF_CSTATE_PKG_C7_RES) | BIT(PERF_CSTATE_PKG_C8_RES) | - BIT(PERF_CSTATE_PKG_C9_RES) | BIT(PERF_CSTATE_PKG_C10_RES), }; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index e010bfed8417..80a4f712217b 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1831,8 +1831,12 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, set_linear_ip(regs, basic->ip); regs->flags = PERF_EFLAGS_EXACT; - if ((sample_type & PERF_SAMPLE_WEIGHT_STRUCT) && (x86_pmu.flags & PMU_FL_RETIRE_LATENCY)) - data->weight.var3_w = format_size >> PEBS_RETIRE_LATENCY_OFFSET & PEBS_LATENCY_MASK; + if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) { + if (x86_pmu.flags & PMU_FL_RETIRE_LATENCY) + data->weight.var3_w = format_size >> PEBS_RETIRE_LATENCY_OFFSET & PEBS_LATENCY_MASK; + else + data->weight.var3_w = 0; + } /* * The record for MEMINFO is in front of GP diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 14db6d9d318b..b4aa8daa4773 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -878,7 +878,7 @@ static void pt_update_head(struct pt *pt) */ static void *pt_buffer_region(struct pt_buffer *buf) { - return phys_to_virt(TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT); + return phys_to_virt((phys_addr_t)TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT); } /** @@ -990,7 +990,7 @@ pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg) * order allocations, there shouldn't be many of these. */ list_for_each_entry(topa, &buf->tables, list) { - if (topa->offset + topa->size > pg << PAGE_SHIFT) + if (topa->offset + topa->size > (unsigned long)pg << PAGE_SHIFT) goto found; } diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index 96906a62aacd..f5e46c04c145 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h @@ -33,8 +33,8 @@ struct topa_entry { u64 rsvd2 : 1; u64 size : 4; u64 rsvd3 : 2; - u64 base : 36; - u64 rsvd4 : 16; + u64 base : 40; + u64 rsvd4 : 12; }; /* TSC to Core Crystal Clock Ratio */ diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 74b8b21e8990..1891c2c7823b 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -462,6 +462,7 @@ #define SPR_UBOX_DID 0x3250 /* SPR CHA */ +#define SPR_CHA_EVENT_MASK_EXT 0xffffffff #define SPR_CHA_PMON_CTL_TID_EN (1 << 16) #define SPR_CHA_PMON_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ SPR_CHA_PMON_CTL_TID_EN) @@ -478,6 +479,7 @@ DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55"); DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57"); DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39"); DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55"); +DEFINE_UNCORE_FORMAT_ATTR(umask_ext5, umask, "config:8-15,32-63"); DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); @@ -5958,7 +5960,7 @@ static struct intel_uncore_ops spr_uncore_chabox_ops = { static struct attribute *spr_uncore_cha_formats_attr[] = { &format_attr_event.attr, - &format_attr_umask_ext4.attr, + &format_attr_umask_ext5.attr, &format_attr_tid_en2.attr, &format_attr_edge.attr, &format_attr_inv.attr, @@ -5994,7 +5996,7 @@ ATTRIBUTE_GROUPS(uncore_alias); static struct intel_uncore_type spr_uncore_chabox = { .name = "cha", .event_mask = SPR_CHA_PMON_EVENT_MASK, - .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, + .event_mask_ext = SPR_CHA_EVENT_MASK_EXT, .num_shared_regs = 1, .constraints = skx_uncore_chabox_constraints, .ops = &spr_uncore_chabox_ops, diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 5187fcf4b610..8d5a2b4f5f00 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -85,7 +85,6 @@ KVM_X86_OP_OPTIONAL(update_cr8_intercept) KVM_X86_OP(refresh_apicv_exec_ctrl) KVM_X86_OP_OPTIONAL(hwapic_irr_update) KVM_X86_OP_OPTIONAL(hwapic_isr_update) -KVM_X86_OP_OPTIONAL_RET0(guest_apic_has_interrupt) KVM_X86_OP_OPTIONAL(load_eoi_exitmap) KVM_X86_OP_OPTIONAL(set_virtual_apic_mode) KVM_X86_OP_OPTIONAL(set_apic_access_page_addr) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f8ca74e7678f..d0274b3be2c4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1714,7 +1714,6 @@ struct kvm_x86_ops { void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); void (*hwapic_isr_update)(int isr); - bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu); void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu); @@ -1819,7 +1818,7 @@ struct kvm_x86_nested_ops { bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector, u32 error_code); int (*check_events)(struct kvm_vcpu *vcpu); - bool (*has_events)(struct kvm_vcpu *vcpu); + bool (*has_events)(struct kvm_vcpu *vcpu, bool for_injection); void (*triple_fault)(struct kvm_vcpu *vcpu); int (*get_state)(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, diff --git a/arch/x86/include/asm/shstk.h b/arch/x86/include/asm/shstk.h index 42fee8959df7..896909f306e3 100644 --- a/arch/x86/include/asm/shstk.h +++ b/arch/x86/include/asm/shstk.h @@ -21,6 +21,7 @@ unsigned long shstk_alloc_thread_stack(struct task_struct *p, unsigned long clon void shstk_free(struct task_struct *p); int setup_signal_shadow_stack(struct ksignal *ksig); int restore_signal_shadow_stack(void); +int shstk_update_last_frame(unsigned long val); #else static inline long shstk_prctl(struct task_struct *task, int option, unsigned long arg2) { return -EINVAL; } @@ -31,6 +32,7 @@ static inline unsigned long shstk_alloc_thread_stack(struct task_struct *p, static inline void shstk_free(struct task_struct *p) {} static inline int setup_signal_shadow_stack(struct ksignal *ksig) { return 0; } static inline int restore_signal_shadow_stack(void) { return 0; } +static inline int shstk_update_last_frame(unsigned long val) { return 0; } #endif /* CONFIG_X86_USER_SHADOW_STACK */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 8e3c53b4d070..64280879c68c 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -83,7 +83,7 @@ static int x86_of_pci_irq_enable(struct pci_dev *dev) ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (ret) - return ret; + return pcibios_err_to_errno(ret); if (!pin) return 0; diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c index 6f1e9883f074..9797d4cdb78a 100644 --- a/arch/x86/kernel/shstk.c +++ b/arch/x86/kernel/shstk.c @@ -577,3 +577,14 @@ long shstk_prctl(struct task_struct *task, int option, unsigned long arg2) return wrss_control(true); return -EINVAL; } + +int shstk_update_last_frame(unsigned long val) +{ + unsigned long ssp; + + if (!features_enabled(ARCH_SHSTK_SHSTK)) + return 0; + + ssp = get_user_shstk_addr(); + return write_user_shstk_64((u64 __user *)ssp, (u64)val); +} diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 6c07f6daaa22..6402fb3089d2 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -1076,8 +1076,13 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs return orig_ret_vaddr; nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize); - if (likely(!nleft)) + if (likely(!nleft)) { + if (shstk_update_last_frame(trampoline_vaddr)) { + force_sig(SIGSEGV); + return -1; + } return orig_ret_vaddr; + } if (nleft != rasize) { pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n", diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index d4ed681785fd..547fca3709fe 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -97,7 +97,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS, .hwapic_irr_update = vmx_hwapic_irr_update, .hwapic_isr_update = vmx_hwapic_isr_update, - .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, .sync_pir_to_irr = vmx_sync_pir_to_irr, .deliver_interrupt = vmx_deliver_interrupt, .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt, diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 643935a0f70a..7c57d6524f75 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -12,6 +12,7 @@ #include "mmu.h" #include "nested.h" #include "pmu.h" +#include "posted_intr.h" #include "sgx.h" #include "trace.h" #include "vmx.h" @@ -3899,8 +3900,8 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) if (!pi_test_and_clear_on(vmx->nested.pi_desc)) return 0; - max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); - if (max_irr != 256) { + max_irr = pi_find_highest_vector(vmx->nested.pi_desc); + if (max_irr > 0) { vapic_page = vmx->nested.virtual_apic_map.hva; if (!vapic_page) goto mmio_needed; @@ -4031,10 +4032,46 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu) to_vmx(vcpu)->nested.preemption_timer_expired; } -static bool vmx_has_nested_events(struct kvm_vcpu *vcpu) +static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection) { - return nested_vmx_preemption_timer_pending(vcpu) || - to_vmx(vcpu)->nested.mtf_pending; + struct vcpu_vmx *vmx = to_vmx(vcpu); + void *vapic = vmx->nested.virtual_apic_map.hva; + int max_irr, vppr; + + if (nested_vmx_preemption_timer_pending(vcpu) || + vmx->nested.mtf_pending) + return true; + + /* + * Virtual Interrupt Delivery doesn't require manual injection. Either + * the interrupt is already in GUEST_RVI and will be recognized by CPU + * at VM-Entry, or there is a KVM_REQ_EVENT pending and KVM will move + * the interrupt from the PIR to RVI prior to entering the guest. + */ + if (for_injection) + return false; + + if (!nested_cpu_has_vid(get_vmcs12(vcpu)) || + __vmx_interrupt_blocked(vcpu)) + return false; + + if (!vapic) + return false; + + vppr = *((u32 *)(vapic + APIC_PROCPRI)); + + max_irr = vmx_get_rvi(); + if ((max_irr & 0xf0) > (vppr & 0xf0)) + return true; + + if (vmx->nested.pi_pending && vmx->nested.pi_desc && + pi_test_on(vmx->nested.pi_desc)) { + max_irr = pi_find_highest_vector(vmx->nested.pi_desc); + if (max_irr > 0 && (max_irr & 0xf0) > (vppr & 0xf0)) + return true; + } + + return false; } /* diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h index 6b2a0226257e..1715d2ab07be 100644 --- a/arch/x86/kvm/vmx/posted_intr.h +++ b/arch/x86/kvm/vmx/posted_intr.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __KVM_X86_VMX_POSTED_INTR_H #define __KVM_X86_VMX_POSTED_INTR_H + +#include <linux/find.h> #include <asm/posted_intr.h> void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu); @@ -12,4 +14,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); void vmx_pi_start_assignment(struct kvm *kvm); +static inline int pi_find_highest_vector(struct pi_desc *pi_desc) +{ + int vec; + + vec = find_last_bit((unsigned long *)pi_desc->pir, 256); + return vec < 256 ? vec : -1; +} + #endif /* __KVM_X86_VMX_POSTED_INTR_H */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b3c83c06f826..2792c5086977 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4108,26 +4108,6 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu) } } -bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - void *vapic_page; - u32 vppr; - int rvi; - - if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || - !nested_cpu_has_vid(get_vmcs12(vcpu)) || - WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn)) - return false; - - rvi = vmx_get_rvi(); - - vapic_page = vmx->nested.virtual_apic_map.hva; - vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); - - return ((rvi & 0xf0) > (vppr & 0xf0)); -} - void vmx_msr_filter_changed(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -5052,14 +5032,19 @@ int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) return !vmx_nmi_blocked(vcpu); } +bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu) +{ + return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) || + (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & + (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); +} + bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) return false; - return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) || - (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & - (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); + return __vmx_interrupt_blocked(vcpu); } int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 7b64e271a931..2e23a01fe320 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -406,6 +406,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu); void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); bool vmx_nmi_blocked(struct kvm_vcpu *vcpu); +bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu); bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu); bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index 502704596c83..d404227c164d 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -49,7 +49,6 @@ void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu); bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason); void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr); void vmx_hwapic_isr_update(int max_isr); -bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu); int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu); void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, int trig_mode, int vector); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0763a0f72a06..0b7adf3bc58a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10516,7 +10516,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, if (is_guest_mode(vcpu) && kvm_x86_ops.nested_ops->has_events && - kvm_x86_ops.nested_ops->has_events(vcpu)) + kvm_x86_ops.nested_ops->has_events(vcpu, true)) *req_immediate_exit = true; /* @@ -13100,12 +13100,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, kvm_arch_free_memslot(kvm, old); } -static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) -{ - return (is_guest_mode(vcpu) && - static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); -} - static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) { if (!list_empty_careful(&vcpu->async_pf.done)) @@ -13136,9 +13130,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) if (kvm_test_request(KVM_REQ_PMI, vcpu)) return true; - if (kvm_arch_interrupt_allowed(vcpu) && - (kvm_cpu_has_interrupt(vcpu) || - kvm_guest_apic_has_interrupt(vcpu))) + if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu)) return true; if (kvm_hv_has_stimer_pending(vcpu)) @@ -13146,7 +13138,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) if (is_guest_mode(vcpu) && kvm_x86_ops.nested_ops->has_events && - kvm_x86_ops.nested_ops->has_events(vcpu)) + kvm_x86_ops.nested_ops->has_events(vcpu, false)) return true; if (kvm_xen_has_pending_events(vcpu)) diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 8edd62206604..722a33be08a1 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -233,9 +233,9 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) return 0; ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); - if (ret < 0) { + if (ret) { dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret); - return ret; + return pcibios_err_to_errno(ret); } id = x86_match_cpu(intel_mid_cpu_ids); diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 652cd53e77f6..0f2fe524f60d 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -38,10 +38,10 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev) u8 gsi; rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); - if (rc < 0) { + if (rc) { dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", rc); - return rc; + return pcibios_err_to_errno(rc); } /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/ pirq = gsi; diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c index fdd49d70b437..c81cea208c2c 100644 --- a/arch/x86/platform/intel/iosf_mbi.c +++ b/arch/x86/platform/intel/iosf_mbi.c @@ -62,7 +62,7 @@ static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr) fail_read: dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); - return result; + return pcibios_err_to_errno(result); } static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr) @@ -91,7 +91,7 @@ static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr) fail_write: dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); - return result; + return pcibios_err_to_errno(result); } int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr) diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c index 89df5d89d664..51655133eee3 100644 --- a/arch/x86/um/sys_call_table_32.c +++ b/arch/x86/um/sys_call_table_32.c @@ -9,6 +9,10 @@ #include <linux/cache.h> #include <asm/syscall.h> +extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, + unsigned long, unsigned long, + unsigned long, unsigned long); + /* * Below you can see, in terms of #define's, the differences between the x86-64 * and the UML syscall table. @@ -22,15 +26,13 @@ #define sys_vm86 sys_ni_syscall #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#define __SYSCALL_NORETURN __SYSCALL #define __SYSCALL(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); #include <asm/syscalls_32.h> +#undef __SYSCALL -#undef __SYSCALL #define __SYSCALL(nr, sym) sym, - -extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); - const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { #include <asm/syscalls_32.h> }; diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c index b0b4cfd2308c..943d414f2109 100644 --- a/arch/x86/um/sys_call_table_64.c +++ b/arch/x86/um/sys_call_table_64.c @@ -9,6 +9,10 @@ #include <linux/cache.h> #include <asm/syscall.h> +extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, + unsigned long, unsigned long, + unsigned long, unsigned long); + /* * Below you can see, in terms of #define's, the differences between the x86-64 * and the UML syscall table. @@ -18,14 +22,13 @@ #define sys_iopl sys_ni_syscall #define sys_ioperm sys_ni_syscall +#define __SYSCALL_NORETURN __SYSCALL + #define __SYSCALL(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); #include <asm/syscalls_64.h> +#undef __SYSCALL -#undef __SYSCALL #define __SYSCALL(nr, sym) sym, - -extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); - const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { #include <asm/syscalls_64.h> }; diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c index 0ae10535c699..0ce17766c0e5 100644 --- a/arch/x86/virt/svm/sev.c +++ b/arch/x86/virt/svm/sev.c @@ -120,7 +120,7 @@ static __init void snp_enable(void *arg) bool snp_probe_rmptable_info(void) { - u64 max_rmp_pfn, calc_rmp_sz, rmp_sz, rmp_base, rmp_end; + u64 rmp_sz, rmp_base, rmp_end; rdmsrl(MSR_AMD64_RMP_BASE, rmp_base); rdmsrl(MSR_AMD64_RMP_END, rmp_end); @@ -137,28 +137,11 @@ bool snp_probe_rmptable_info(void) rmp_sz = rmp_end - rmp_base + 1; - /* - * Calculate the amount the memory that must be reserved by the BIOS to - * address the whole RAM, including the bookkeeping area. The RMP itself - * must also be covered. - */ - max_rmp_pfn = max_pfn; - if (PHYS_PFN(rmp_end) > max_pfn) - max_rmp_pfn = PHYS_PFN(rmp_end); - - calc_rmp_sz = (max_rmp_pfn << 4) + RMPTABLE_CPU_BOOKKEEPING_SZ; - - if (calc_rmp_sz > rmp_sz) { - pr_err("Memory reserved for the RMP table does not cover full system RAM (expected 0x%llx got 0x%llx)\n", - calc_rmp_sz, rmp_sz); - return false; - } - probed_rmp_base = rmp_base; probed_rmp_size = rmp_sz; pr_info("RMP table physical range [0x%016llx - 0x%016llx]\n", - probed_rmp_base, probed_rmp_base + probed_rmp_size - 1); + rmp_base, rmp_end); return true; } @@ -206,9 +189,8 @@ void __init snp_fixup_e820_tables(void) */ static int __init snp_rmptable_init(void) { + u64 max_rmp_pfn, calc_rmp_sz, rmptable_size, rmp_end, val; void *rmptable_start; - u64 rmptable_size; - u64 val; if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) return 0; @@ -219,10 +201,28 @@ static int __init snp_rmptable_init(void) if (!probed_rmp_size) goto nosnp; + rmp_end = probed_rmp_base + probed_rmp_size - 1; + + /* + * Calculate the amount the memory that must be reserved by the BIOS to + * address the whole RAM, including the bookkeeping area. The RMP itself + * must also be covered. + */ + max_rmp_pfn = max_pfn; + if (PFN_UP(rmp_end) > max_pfn) + max_rmp_pfn = PFN_UP(rmp_end); + + calc_rmp_sz = (max_rmp_pfn << 4) + RMPTABLE_CPU_BOOKKEEPING_SZ; + if (calc_rmp_sz > probed_rmp_size) { + pr_err("Memory reserved for the RMP table does not cover full system RAM (expected 0x%llx got 0x%llx)\n", + calc_rmp_sz, probed_rmp_size); + goto nosnp; + } + rmptable_start = memremap(probed_rmp_base, probed_rmp_size, MEMREMAP_WB); if (!rmptable_start) { pr_err("Failed to map RMP table\n"); - return 1; + goto nosnp; } /* diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 99918beccd80..6bcbdf3b7999 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -730,7 +730,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, * immediate unmapping. */ map_ops[i].status = GNTST_general_error; - unmap[0].host_addr = map_ops[i].host_addr, + unmap[0].host_addr = map_ops[i].host_addr; unmap[0].handle = map_ops[i].handle; map_ops[i].handle = INVALID_GRANT_HANDLE; if (map_ops[i].flags & GNTMAP_device_map) @@ -740,7 +740,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, if (kmap_ops) { kmap_ops[i].status = GNTST_general_error; - unmap[1].host_addr = kmap_ops[i].host_addr, + unmap[1].host_addr = kmap_ops[i].host_addr; unmap[1].handle = kmap_ops[i].handle; kmap_ops[i].handle = INVALID_GRANT_HANDLE; if (kmap_ops[i].flags & GNTMAP_device_map) diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 8b528e12136f..741581a752c4 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -454,6 +454,7 @@ bool bio_integrity_prep(struct bio *bio) unsigned long start, end; unsigned int len, nr_pages; unsigned int bytes, offset, i; + gfp_t gfp = GFP_NOIO; if (!bi) return true; @@ -476,11 +477,19 @@ bool bio_integrity_prep(struct bio *bio) if (!bi->profile->generate_fn || !(bi->flags & BLK_INTEGRITY_GENERATE)) return true; + + /* + * Zero the memory allocated to not leak uninitialized kernel + * memory to disk. For PI this only affects the app tag, but + * for non-integrity metadata it affects the entire metadata + * buffer. + */ + gfp |= __GFP_ZERO; } /* Allocate kernel buffer for protection data */ len = bio_integrity_bytes(bi, bio_sectors(bio)); - buf = kmalloc(len, GFP_NOIO); + buf = kmalloc(len, gfp); if (unlikely(buf == NULL)) { printk(KERN_ERR "could not allocate integrity buffer\n"); goto err_end_io; diff --git a/block/blk-mq.c b/block/blk-mq.c index 3b4df8e5ac9e..1c85a5c79ee6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -448,6 +448,10 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) if (data->cmd_flags & REQ_NOWAIT) data->flags |= BLK_MQ_REQ_NOWAIT; +retry: + data->ctx = blk_mq_get_ctx(q); + data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); + if (q->elevator) { /* * All requests use scheduler tags when an I/O scheduler is @@ -469,13 +473,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) if (ops->limit_depth) ops->limit_depth(data->cmd_flags, data); } - } - -retry: - data->ctx = blk_mq_get_ctx(q); - data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); - if (!(data->rq_flags & RQF_SCHED_TAGS)) + } else { blk_mq_tag_busy(data->hctx); + } if (data->flags & BLK_MQ_REQ_RESERVED) data->rq_flags |= RQF_RESV; @@ -2914,6 +2914,17 @@ static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, INIT_LIST_HEAD(&rq->queuelist); } +static bool bio_unaligned(const struct bio *bio, struct request_queue *q) +{ + unsigned int bs_mask = queue_logical_block_size(q) - 1; + + /* .bi_sector of any zero sized bio need to be initialized */ + if ((bio->bi_iter.bi_size & bs_mask) || + ((bio->bi_iter.bi_sector << SECTOR_SHIFT) & bs_mask)) + return true; + return false; +} + /** * blk_mq_submit_bio - Create and send a request to block device. * @bio: Bio pointer. @@ -2966,6 +2977,15 @@ void blk_mq_submit_bio(struct bio *bio) return; } + /* + * Device reconfiguration may change logical block size, so alignment + * check has to be done with queue usage counter held + */ + if (unlikely(bio_unaligned(bio, q))) { + bio_io_error(bio); + goto queue_exit; + } + if (unlikely(bio_may_exceed_limits(bio, &q->limits))) { bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); if (!bio) diff --git a/block/genhd.c b/block/genhd.c index 8f1f3c6b4d67..c5fca3e893a0 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -663,12 +663,12 @@ void del_gendisk(struct gendisk *disk) */ if (!test_bit(GD_DEAD, &disk->state)) blk_report_disk_dead(disk, false); - __blk_mark_disk_dead(disk); /* * Drop all partitions now that the disk is marked dead. */ mutex_lock(&disk->open_mutex); + __blk_mark_disk_dead(disk); xa_for_each_start(&disk->part_tbl, idx, part, 1) drop_partition(part); mutex_unlock(&disk->open_mutex); diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 94eede4fb9eb..acdc28756d9d 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -487,6 +487,20 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) return rq; } +/* + * 'depth' is a number in the range 1..INT_MAX representing a number of + * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since + * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow(). + * Values larger than q->nr_requests have the same effect as q->nr_requests. + */ +static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth) +{ + struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags; + const unsigned int nrr = hctx->queue->nr_requests; + + return ((qdepth << bt->sb.shift) + nrr - 1) / nrr; +} + /* * Called by __blk_mq_alloc_request(). The shallow_depth value set by this * function is used by __blk_mq_get_tag(). @@ -503,7 +517,7 @@ static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) * Throttle asynchronous requests and writes such that these requests * do not block the allocation of synchronous requests. */ - data->shallow_depth = dd->async_depth; + data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth); } /* Called by blk_mq_update_nr_requests(). */ @@ -513,9 +527,9 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) struct deadline_data *dd = q->elevator->elevator_data; struct blk_mq_tags *tags = hctx->sched_tags; - dd->async_depth = max(1UL, 3 * q->nr_requests / 4); + dd->async_depth = q->nr_requests; - sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth); + sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1); } /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */ diff --git a/drivers/android/binder.c b/drivers/android/binder.c index b21a7b246a0d..2d0a24a56508 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -570,9 +570,7 @@ static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) { return !thread->transaction_stack && - binder_worklist_empty_ilocked(&thread->todo) && - (thread->looper & (BINDER_LOOPER_STATE_ENTERED | - BINDER_LOOPER_STATE_REGISTERED)); + binder_worklist_empty_ilocked(&thread->todo); } static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index bb4d30d377ae..076fbeadce01 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -230,6 +230,80 @@ void ata_scsi_set_sense_information(struct ata_device *dev, SCSI_SENSE_BUFFERSIZE, information); } +/** + * ata_scsi_set_passthru_sense_fields - Set ATA fields in sense buffer + * @qc: ATA PASS-THROUGH command. + * + * Populates "ATA Status Return sense data descriptor" / "Fixed format + * sense data" with ATA taskfile fields. + * + * LOCKING: + * None. + */ +static void ata_scsi_set_passthru_sense_fields(struct ata_queued_cmd *qc) +{ + struct scsi_cmnd *cmd = qc->scsicmd; + struct ata_taskfile *tf = &qc->result_tf; + unsigned char *sb = cmd->sense_buffer; + + if ((sb[0] & 0x7f) >= 0x72) { + unsigned char *desc; + u8 len; + + /* descriptor format */ + len = sb[7]; + desc = (char *)scsi_sense_desc_find(sb, len + 8, 9); + if (!desc) { + if (SCSI_SENSE_BUFFERSIZE < len + 14) + return; + sb[7] = len + 14; + desc = sb + 8 + len; + } + desc[0] = 9; + desc[1] = 12; + /* + * Copy registers into sense buffer. + */ + desc[2] = 0x00; + desc[3] = tf->error; + desc[5] = tf->nsect; + desc[7] = tf->lbal; + desc[9] = tf->lbam; + desc[11] = tf->lbah; + desc[12] = tf->device; + desc[13] = tf->status; + + /* + * Fill in Extend bit, and the high order bytes + * if applicable. + */ + if (tf->flags & ATA_TFLAG_LBA48) { + desc[2] |= 0x01; + desc[4] = tf->hob_nsect; + desc[6] = tf->hob_lbal; + desc[8] = tf->hob_lbam; + desc[10] = tf->hob_lbah; + } + } else { + /* Fixed sense format */ + sb[0] |= 0x80; + sb[3] = tf->error; + sb[4] = tf->status; + sb[5] = tf->device; + sb[6] = tf->nsect; + if (tf->flags & ATA_TFLAG_LBA48) { + sb[8] |= 0x80; + if (tf->hob_nsect) + sb[8] |= 0x40; + if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah) + sb[8] |= 0x20; + } + sb[9] = tf->lbal; + sb[10] = tf->lbam; + sb[11] = tf->lbah; + } +} + static void ata_scsi_set_invalid_field(struct ata_device *dev, struct scsi_cmnd *cmd, u16 field, u8 bit) { @@ -837,10 +911,8 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, * ata_gen_passthru_sense - Generate check condition sense block. * @qc: Command that completed. * - * This function is specific to the ATA descriptor format sense - * block specified for the ATA pass through commands. Regardless - * of whether the command errored or not, return a sense - * block. Copy all controller registers into the sense + * This function is specific to the ATA pass through commands. + * Regardless of whether the command errored or not, return a sense * block. If there was no error, we get the request from an ATA * passthrough command, so we use the following sense data: * sk = RECOVERED ERROR @@ -855,7 +927,6 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) struct scsi_cmnd *cmd = qc->scsicmd; struct ata_taskfile *tf = &qc->result_tf; unsigned char *sb = cmd->sense_buffer; - unsigned char *desc = sb + 8; u8 sense_key, asc, ascq; memset(sb, 0, SCSI_SENSE_BUFFERSIZE); @@ -870,67 +941,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) &sense_key, &asc, &ascq); ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); } else { - /* - * ATA PASS-THROUGH INFORMATION AVAILABLE - * Always in descriptor format sense. - */ - scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D); - } - - if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) { - u8 len; - - /* descriptor format */ - len = sb[7]; - desc = (char *)scsi_sense_desc_find(sb, len + 8, 9); - if (!desc) { - if (SCSI_SENSE_BUFFERSIZE < len + 14) - return; - sb[7] = len + 14; - desc = sb + 8 + len; - } - desc[0] = 9; - desc[1] = 12; - /* - * Copy registers into sense buffer. - */ - desc[2] = 0x00; - desc[3] = tf->error; - desc[5] = tf->nsect; - desc[7] = tf->lbal; - desc[9] = tf->lbam; - desc[11] = tf->lbah; - desc[12] = tf->device; - desc[13] = tf->status; - - /* - * Fill in Extend bit, and the high order bytes - * if applicable. - */ - if (tf->flags & ATA_TFLAG_LBA48) { - desc[2] |= 0x01; - desc[4] = tf->hob_nsect; - desc[6] = tf->hob_lbal; - desc[8] = tf->hob_lbam; - desc[10] = tf->hob_lbah; - } - } else { - /* Fixed sense format */ - desc[0] = tf->error; - desc[1] = tf->status; - desc[2] = tf->device; - desc[3] = tf->nsect; - desc[7] = 0; - if (tf->flags & ATA_TFLAG_LBA48) { - desc[8] |= 0x80; - if (tf->hob_nsect) - desc[8] |= 0x40; - if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah) - desc[8] |= 0x20; - } - desc[9] = tf->lbal; - desc[10] = tf->lbam; - desc[11] = tf->lbah; + /* ATA PASS-THROUGH INFORMATION AVAILABLE */ + ata_scsi_set_sense(qc->dev, cmd, RECOVERED_ERROR, 0, 0x1D); } } @@ -1632,26 +1644,32 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) { struct scsi_cmnd *cmd = qc->scsicmd; u8 *cdb = cmd->cmnd; - int need_sense = (qc->err_mask != 0) && - !(qc->flags & ATA_QCFLAG_SENSE_VALID); + bool have_sense = qc->flags & ATA_QCFLAG_SENSE_VALID; + bool is_ata_passthru = cdb[0] == ATA_16 || cdb[0] == ATA_12; + bool is_ck_cond_request = cdb[2] & 0x20; + bool is_error = qc->err_mask != 0; /* For ATA pass thru (SAT) commands, generate a sense block if * user mandated it or if there's an error. Note that if we - * generate because the user forced us to [CK_COND =1], a check + * generate because the user forced us to [CK_COND=1], a check * condition is generated and the ATA register values are returned * whether the command completed successfully or not. If there - * was no error, we use the following sense data: + * was no error, and CK_COND=1, we use the following sense data: * sk = RECOVERED ERROR * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE */ - if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && - ((cdb[2] & 0x20) || need_sense)) - ata_gen_passthru_sense(qc); - else if (need_sense) + if (is_ata_passthru && (is_ck_cond_request || is_error || have_sense)) { + if (!have_sense) + ata_gen_passthru_sense(qc); + ata_scsi_set_passthru_sense_fields(qc); + if (is_ck_cond_request) + set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); + } else if (is_error && !have_sense) { ata_gen_ata_sense(qc); - else + } else { /* Keep the SCSI ML and status byte, clear host byte. */ cmd->result &= 0x0000ffff; + } ata_qc_done(qc); } @@ -2590,14 +2608,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) /* handle completion from EH */ if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) { - if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) { - /* FIXME: not quite right; we don't want the - * translation of taskfile registers into a - * sense descriptors, since that's only - * correct for ATA, not ATAPI - */ + if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) ata_gen_passthru_sense(qc); - } /* SCSI EH automatically locks door if sdev->locked is * set. Sometimes door lock request continues to diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index ce987944662c..8a7034b41d50 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -483,6 +483,7 @@ static int ht16k33_led_probe(struct device *dev, struct led_classdev *led, led->max_brightness = MAX_BRIGHTNESS; err = devm_led_classdev_register_ext(dev, led, &init_data); + fwnode_handle_put(init_data.fwnode); if (err) dev_err(dev, "Failed to register LED\n"); diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 3df0025d12aa..8d709dbd4e0c 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -896,9 +896,12 @@ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp) /* * Otherwise: allocate new, larger chunk. We need to allocate before * taking the lock as most probably the caller uses GFP_KERNEL. + * alloc_dr() will call check_dr_size() to reserve extra memory + * for struct devres automatically, so size @new_size user request + * is delivered to it directly as devm_kmalloc() does. */ new_dr = alloc_dr(devm_kmalloc_release, - total_new_size, gfp, dev_to_node(dev)); + new_size, gfp, dev_to_node(dev)); if (!new_dr) return NULL; @@ -1222,7 +1225,11 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu); */ void devm_free_percpu(struct device *dev, void __percpu *pdata) { - WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, + /* + * Use devres_release() to prevent memory leakage as + * devm_free_pages() does. + */ + WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match, (__force void *)pdata)); } EXPORT_SYMBOL_GPL(devm_free_percpu); diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 75f189e42f88..f94058019352 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -227,7 +227,7 @@ MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 ( static bool g_fua = true; module_param_named(fua, g_fua, bool, 0444); -MODULE_PARM_DESC(zoned, "Enable/disable FUA support when cache_size is used. Default: true"); +MODULE_PARM_DESC(fua, "Enable/disable FUA support when cache_size is used. Default: true"); static unsigned int g_mbps; module_param_named(mbps, g_mbps, uint, 0444); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 26ff5cd2bf0a..da22ce38c039 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -362,7 +362,7 @@ enum rbd_watch_state { enum rbd_lock_state { RBD_LOCK_STATE_UNLOCKED, RBD_LOCK_STATE_LOCKED, - RBD_LOCK_STATE_RELEASING, + RBD_LOCK_STATE_QUIESCING, }; /* WatchNotify::ClientId */ @@ -422,7 +422,7 @@ struct rbd_device { struct list_head running_list; struct completion acquire_wait; int acquire_err; - struct completion releasing_wait; + struct completion quiescing_wait; spinlock_t object_map_lock; u8 *object_map; @@ -525,7 +525,7 @@ static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev) lockdep_assert_held(&rbd_dev->lock_rwsem); return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED || - rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING; + rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING; } static bool rbd_is_lock_owner(struct rbd_device *rbd_dev) @@ -3457,13 +3457,14 @@ static void rbd_lock_del_request(struct rbd_img_request *img_req) lockdep_assert_held(&rbd_dev->lock_rwsem); spin_lock(&rbd_dev->lock_lists_lock); if (!list_empty(&img_req->lock_item)) { + rbd_assert(!list_empty(&rbd_dev->running_list)); list_del_init(&img_req->lock_item); - need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING && + need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING && list_empty(&rbd_dev->running_list)); } spin_unlock(&rbd_dev->lock_lists_lock); if (need_wakeup) - complete(&rbd_dev->releasing_wait); + complete(&rbd_dev->quiescing_wait); } static int rbd_img_exclusive_lock(struct rbd_img_request *img_req) @@ -3476,11 +3477,6 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req) if (rbd_lock_add_request(img_req)) return 1; - if (rbd_dev->opts->exclusive) { - WARN_ON(1); /* lock got released? */ - return -EROFS; - } - /* * Note the use of mod_delayed_work() in rbd_acquire_lock() * and cancel_delayed_work() in wake_lock_waiters(). @@ -4181,16 +4177,16 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev) /* * Ensure that all in-flight IO is flushed. */ - rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING; - rbd_assert(!completion_done(&rbd_dev->releasing_wait)); + rbd_dev->lock_state = RBD_LOCK_STATE_QUIESCING; + rbd_assert(!completion_done(&rbd_dev->quiescing_wait)); if (list_empty(&rbd_dev->running_list)) return true; up_write(&rbd_dev->lock_rwsem); - wait_for_completion(&rbd_dev->releasing_wait); + wait_for_completion(&rbd_dev->quiescing_wait); down_write(&rbd_dev->lock_rwsem); - if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING) + if (rbd_dev->lock_state != RBD_LOCK_STATE_QUIESCING) return false; rbd_assert(list_empty(&rbd_dev->running_list)); @@ -4601,6 +4597,10 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev) rbd_warn(rbd_dev, "failed to update lock cookie: %d", ret); + if (rbd_dev->opts->exclusive) + rbd_warn(rbd_dev, + "temporarily releasing lock on exclusive mapping"); + /* * Lock cookie cannot be updated on older OSDs, so do * a manual release and queue an acquire. @@ -5383,7 +5383,7 @@ static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec) INIT_LIST_HEAD(&rbd_dev->acquiring_list); INIT_LIST_HEAD(&rbd_dev->running_list); init_completion(&rbd_dev->acquire_wait); - init_completion(&rbd_dev->releasing_wait); + init_completion(&rbd_dev->quiescing_wait); spin_lock_init(&rbd_dev->object_map_lock); @@ -6589,11 +6589,6 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) if (ret) return ret; - /* - * The lock may have been released by now, unless automatic lock - * transitions are disabled. - */ - rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev)); return 0; } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 4e159948c912..3b5883932133 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -48,6 +48,9 @@ #define UBLK_MINORS (1U << MINORBITS) +/* private ioctl command mirror */ +#define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC) + /* All UBLK_F_* have to be included into UBLK_F_ALL */ #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \ | UBLK_F_URING_CMD_COMP_IN_TASK \ @@ -2904,7 +2907,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, case UBLK_CMD_DEL_DEV: ret = ublk_ctrl_del_dev(&ub, true); break; - case UBLK_U_CMD_DEL_DEV_ASYNC: + case UBLK_CMD_DEL_DEV_ASYNC: ret = ublk_ctrl_del_dev(&ub, false); break; case UBLK_CMD_GET_QUEUE_AFFINITY: diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index fd7c0ff2139c..67aa63dabcff 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1063,8 +1063,7 @@ static char *encode_disk_name(char *ptr, unsigned int n) } static int xlvbd_alloc_gendisk(blkif_sector_t capacity, - struct blkfront_info *info, u16 sector_size, - unsigned int physical_sector_size) + struct blkfront_info *info) { struct queue_limits lim = {}; struct gendisk *gd; @@ -1159,8 +1158,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, info->rq = gd->queue; info->gd = gd; - info->sector_size = sector_size; - info->physical_sector_size = physical_sector_size; xlvbd_flush(info); @@ -2315,8 +2312,6 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) static void blkfront_connect(struct blkfront_info *info) { unsigned long long sectors; - unsigned long sector_size; - unsigned int physical_sector_size; int err, i; struct blkfront_ring_info *rinfo; @@ -2355,7 +2350,7 @@ static void blkfront_connect(struct blkfront_info *info) err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%llu", §ors, "info", "%u", &info->vdisk_info, - "sector-size", "%lu", §or_size, + "sector-size", "%lu", &info->sector_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, @@ -2369,9 +2364,9 @@ static void blkfront_connect(struct blkfront_info *info) * provide this. Assume physical sector size to be the same as * sector_size in that case. */ - physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend, + info->physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend, "physical-sector-size", - sector_size); + info->sector_size); blkfront_gather_backend_features(info); for_each_rinfo(info, rinfo, i) { err = blkfront_setup_indirect(rinfo); @@ -2383,8 +2378,7 @@ static void blkfront_connect(struct blkfront_info *info) } } - err = xlvbd_alloc_gendisk(sectors, info, sector_size, - physical_sector_size); + err = xlvbd_alloc_gendisk(sectors, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 0c855c3ee1c1..7ecc67deecb0 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -26,21 +26,11 @@ #define ECDSA_OFFSET 644 #define ECDSA_HEADER_LEN 320 -#define BTINTEL_PPAG_NAME "PPAG" - enum { DSM_SET_WDISABLE2_DELAY = 1, DSM_SET_RESET_METHOD = 3, }; -/* structure to store the PPAG data read from ACPI table */ -struct btintel_ppag { - u32 domain; - u32 mode; - acpi_status status; - struct hci_dev *hdev; -}; - #define CMD_WRITE_BOOT_PARAMS 0xfc0e struct cmd_write_boot_params { __le32 boot_addr; @@ -1324,65 +1314,6 @@ static int btintel_read_debug_features(struct hci_dev *hdev, return 0; } -static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data, - void **ret) -{ - acpi_status status; - size_t len; - struct btintel_ppag *ppag = data; - union acpi_object *p, *elements; - struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL}; - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; - struct hci_dev *hdev = ppag->hdev; - - status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); - if (ACPI_FAILURE(status)) { - bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); - return status; - } - - len = strlen(string.pointer); - if (len < strlen(BTINTEL_PPAG_NAME)) { - kfree(string.pointer); - return AE_OK; - } - - if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) { - kfree(string.pointer); - return AE_OK; - } - kfree(string.pointer); - - status = acpi_evaluate_object(handle, NULL, NULL, &buffer); - if (ACPI_FAILURE(status)) { - ppag->status = status; - bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); - return status; - } - - p = buffer.pointer; - ppag = (struct btintel_ppag *)data; - - if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) { - kfree(buffer.pointer); - bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d", - p->type, p->package.count); - ppag->status = AE_ERROR; - return AE_ERROR; - } - - elements = p->package.elements; - - /* PPAG table is located at element[1] */ - p = &elements[1]; - - ppag->domain = (u32)p->package.elements[0].integer.value; - ppag->mode = (u32)p->package.elements[1].integer.value; - ppag->status = AE_OK; - kfree(buffer.pointer); - return AE_CTRL_TERMINATE; -} - static int btintel_set_debug_features(struct hci_dev *hdev, const struct intel_debug_features *features) { @@ -2427,10 +2358,13 @@ static int btintel_configure_offload(struct hci_dev *hdev) static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver) { - struct btintel_ppag ppag; struct sk_buff *skb; struct hci_ppag_enable_cmd ppag_cmd; acpi_handle handle; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *p, *elements; + u32 domain, mode; + acpi_status status; /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */ switch (ver->cnvr_top & 0xFFF) { @@ -2448,22 +2382,34 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver return; } - memset(&ppag, 0, sizeof(ppag)); - - ppag.hdev = hdev; - ppag.status = AE_NOT_FOUND; - acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL, - btintel_ppag_callback, &ppag, NULL); - - if (ACPI_FAILURE(ppag.status)) { - if (ppag.status == AE_NOT_FOUND) { + status = acpi_evaluate_object(handle, "PPAG", NULL, &buffer); + if (ACPI_FAILURE(status)) { + if (status == AE_NOT_FOUND) { bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found"); return; } + bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); + return; + } + + p = buffer.pointer; + if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) { + bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d", + p->type, p->package.count); + kfree(buffer.pointer); return; } - if (ppag.domain != 0x12) { + elements = p->package.elements; + + /* PPAG table is located at element[1] */ + p = &elements[1]; + + domain = (u32)p->package.elements[0].integer.value; + mode = (u32)p->package.elements[1].integer.value; + kfree(buffer.pointer); + + if (domain != 0x12) { bt_dev_dbg(hdev, "PPAG-BT: Bluetooth domain is disabled in ACPI firmware"); return; } @@ -2474,19 +2420,22 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver * BIT 1 : 0 Disabled in China * 1 Enabled in China */ - if ((ppag.mode & 0x01) != BIT(0) && (ppag.mode & 0x02) != BIT(1)) { - bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in CB/BIOS"); + mode &= 0x03; + + if (!mode) { + bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in BIOS"); return; } - ppag_cmd.ppag_enable_flags = cpu_to_le32(ppag.mode); + ppag_cmd.ppag_enable_flags = cpu_to_le32(mode); - skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT); + skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), + &ppag_cmd, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb)); return; } - bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", ppag.mode); + bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", mode); kfree_skb(skb); } diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index dd3c0626c72d..b8120b98a239 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -1327,6 +1327,12 @@ static void btintel_pcie_remove(struct pci_dev *pdev) data = pci_get_drvdata(pdev); btintel_pcie_reset_bt(data); + for (int i = 0; i < data->alloc_vecs; i++) { + struct msix_entry *msix_entry; + + msix_entry = &data->msix_entries[i]; + free_irq(msix_entry->vector, msix_entry); + } pci_free_irq_vectors(pdev); diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 9bfa9a6ad56c..6a863328b805 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -187,6 +187,11 @@ struct btnxpuart_dev { #define NXP_NAK_V3 0x7b #define NXP_CRC_ERROR_V3 0x7c +/* Bootloader signature error codes */ +#define NXP_ACK_RX_TIMEOUT 0x0002 /* ACK not received from host */ +#define NXP_HDR_RX_TIMEOUT 0x0003 /* FW Header chunk not received */ +#define NXP_DATA_RX_TIMEOUT 0x0004 /* FW Data chunk not received */ + #define HDR_LEN 16 #define NXP_RECV_CHIP_VER_V1 \ @@ -277,6 +282,17 @@ struct nxp_bootloader_cmd { __be32 crc; } __packed; +struct nxp_v3_rx_timeout_nak { + u8 nak; + __le32 offset; + u8 crc; +} __packed; + +union nxp_v3_rx_timeout_nak_u { + struct nxp_v3_rx_timeout_nak pkt; + u8 buf[6]; +}; + static u8 crc8_table[CRC8_TABLE_SIZE]; /* Default configurations */ @@ -899,6 +915,32 @@ static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb) return 0; } +static void nxp_handle_fw_download_error(struct hci_dev *hdev, struct v3_data_req *req) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + __u32 offset = __le32_to_cpu(req->offset); + __u16 err = __le16_to_cpu(req->error); + union nxp_v3_rx_timeout_nak_u nak_tx_buf; + + switch (err) { + case NXP_ACK_RX_TIMEOUT: + case NXP_HDR_RX_TIMEOUT: + case NXP_DATA_RX_TIMEOUT: + nak_tx_buf.pkt.nak = NXP_NAK_V3; + nak_tx_buf.pkt.offset = __cpu_to_le32(offset); + nak_tx_buf.pkt.crc = crc8(crc8_table, nak_tx_buf.buf, + sizeof(nak_tx_buf) - 1, 0xff); + serdev_device_write_buf(nxpdev->serdev, nak_tx_buf.buf, + sizeof(nak_tx_buf)); + break; + default: + bt_dev_dbg(hdev, "Unknown bootloader error code: %d", err); + break; + + } + +} + static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); @@ -913,7 +955,12 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) if (!req || !nxpdev->fw) goto free_skb; - nxp_send_ack(NXP_ACK_V3, hdev); + if (!req->error) { + nxp_send_ack(NXP_ACK_V3, hdev); + } else { + nxp_handle_fw_download_error(hdev, req); + goto free_skb; + } len = __le16_to_cpu(req->len); @@ -940,9 +987,6 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); goto free_skb; } - if (req->error) - bt_dev_dbg(hdev, "FW Download received err 0x%02x from chip", - req->error); offset = __le32_to_cpu(req->offset); if (offset < nxpdev->fw_v3_offset_correction) { diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index e384ef6ff050..789c492df6fa 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -555,6 +555,10 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3572), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Realtek 8852BT/8852BE-VT Bluetooth devices */ { USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK | diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c index d90858ea2fe5..a77a30fdc630 100644 --- a/drivers/bluetooth/hci_bcm4377.c +++ b/drivers/bluetooth/hci_bcm4377.c @@ -32,7 +32,7 @@ enum bcm4377_chip { #define BCM4378_DEVICE_ID 0x5f69 #define BCM4387_DEVICE_ID 0x5f71 -#define BCM4377_TIMEOUT 1000 +#define BCM4377_TIMEOUT msecs_to_jiffies(1000) /* * These devices only support DMA transactions inside a 32bit window diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index f8f674adf1d4..4acfac73ca9a 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -90,7 +90,7 @@ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct m struct mhi_ring_element *event; int ret; - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); if (!event) return -ENOMEM; @@ -109,7 +109,7 @@ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_stat struct mhi_ring_element *event; int ret; - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); if (!event) return -ENOMEM; @@ -127,7 +127,7 @@ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_e struct mhi_ring_element *event; int ret; - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); if (!event) return -ENOMEM; @@ -146,7 +146,7 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e struct mhi_ring_element *event; int ret; - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); if (!event) return -ENOMEM; @@ -438,7 +438,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; write_offset = len - buf_left; - buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA); + buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL); if (!buf_addr) return -ENOMEM; @@ -1481,14 +1481,14 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el", sizeof(struct mhi_ring_element), 0, - SLAB_CACHE_DMA, NULL); + 0, NULL); if (!mhi_cntrl->ev_ring_el_cache) { ret = -ENOMEM; goto err_free_cmd; } mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0, - SLAB_CACHE_DMA, NULL); + 0, NULL); if (!mhi_cntrl->tre_buf_cache) { ret = -ENOMEM; goto err_destroy_ev_ring_el_cache; diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 86162a13681e..9a24d19236dc 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -143,8 +143,10 @@ static int __init amd_rng_mod_init(void) found: err = pci_read_config_dword(pdev, 0x58, &pmbase); - if (err) + if (err) { + err = pcibios_err_to_errno(err); goto put_dev; + } pmbase &= 0x0000FF00; if (pmbase == 0) { diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 4084df65c9fa..f6122a03ee37 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -161,7 +161,6 @@ static int hwrng_init(struct hwrng *rng) reinit_completion(&rng->cleanup_done); skip_init: - rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); current_quality = rng->quality; /* obsolete */ return 0; @@ -545,6 +544,9 @@ int hwrng_register(struct hwrng *rng) complete(&rng->cleanup_done); init_completion(&rng->dying); + /* Adjust quality field to always have a proper value */ + rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); + if (!current_rng || (!cur_rng_set_by_user && rng->quality > current_rng->quality)) { /* diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c index 56346fb32872..ab4e87a99f08 100644 --- a/drivers/char/ipmi/ssif_bmc.c +++ b/drivers/char/ipmi/ssif_bmc.c @@ -177,13 +177,15 @@ static ssize_t ssif_bmc_write(struct file *file, const char __user *buf, size_t unsigned long flags; ssize_t ret; - if (count > sizeof(struct ipmi_ssif_msg)) + if (count < sizeof(msg.len) || + count > sizeof(struct ipmi_ssif_msg)) return -EINVAL; if (copy_from_user(&msg, buf, count)) return -EFAULT; - if (!msg.len || count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len) + if (!msg.len || msg.len > IPMI_SSIF_PAYLOAD_MAX || + count < sizeof_field(struct ipmi_ssif_msg, len) + msg.len) return -EINVAL; spin_lock_irqsave(&ssif_bmc->lock, flags); diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c index 639c3f395a5a..4c0bbba64ee5 100644 --- a/drivers/char/tpm/eventlog/common.c +++ b/drivers/char/tpm/eventlog/common.c @@ -47,6 +47,8 @@ static int tpm_bios_measurements_open(struct inode *inode, if (!err) { seq = file->private_data; seq->private = chip; + } else { + put_device(&chip->dev); } return err; diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c index c9eca24bbad4..61b42c83ced8 100644 --- a/drivers/char/tpm/tpm_tis_spi_main.c +++ b/drivers/char/tpm/tpm_tis_spi_main.c @@ -318,6 +318,7 @@ static void tpm_tis_spi_remove(struct spi_device *dev) } static const struct spi_device_id tpm_tis_spi_id[] = { + { "attpm20p", (unsigned long)tpm_tis_spi_probe }, { "st33htpm-spi", (unsigned long)tpm_tis_spi_probe }, { "slb9670", (unsigned long)tpm_tis_spi_probe }, { "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe }, diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c index ccc394692671..bdf5cbc12e23 100644 --- a/drivers/clk/clk-en7523.c +++ b/drivers/clk/clk-en7523.c @@ -57,6 +57,7 @@ struct en_clk_desc { u8 div_shift; u16 div_val0; u8 div_step; + u8 div_offset; }; struct en_clk_gate { @@ -90,6 +91,7 @@ static const struct en_clk_desc en7523_base_clks[] = { .div_bits = 3, .div_shift = 0, .div_step = 1, + .div_offset = 1, }, { .id = EN7523_CLK_EMI, .name = "emi", @@ -103,6 +105,7 @@ static const struct en_clk_desc en7523_base_clks[] = { .div_bits = 3, .div_shift = 0, .div_step = 1, + .div_offset = 1, }, { .id = EN7523_CLK_BUS, .name = "bus", @@ -116,6 +119,7 @@ static const struct en_clk_desc en7523_base_clks[] = { .div_bits = 3, .div_shift = 0, .div_step = 1, + .div_offset = 1, }, { .id = EN7523_CLK_SLIC, .name = "slic", @@ -156,13 +160,14 @@ static const struct en_clk_desc en7523_base_clks[] = { .div_bits = 3, .div_shift = 0, .div_step = 1, + .div_offset = 1, }, { .id = EN7523_CLK_CRYPTO, .name = "crypto", .base_reg = REG_CRYPTO_CLKSRC, .base_bits = 1, - .base_shift = 8, + .base_shift = 0, .base_values = emi_base, .n_base_values = ARRAY_SIZE(emi_base), } @@ -202,7 +207,7 @@ static u32 en7523_get_div(void __iomem *base, int i) if (!val && desc->div_val0) return desc->div_val0; - return (val + 1) * desc->div_step; + return (val + desc->div_offset) * desc->div_step; } static int en7523_pci_is_enabled(struct clk_hw *hw) diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c index ad2d0df43dc6..ec60ecb517f1 100644 --- a/drivers/clk/davinci/da8xx-cfgchip.c +++ b/drivers/clk/davinci/da8xx-cfgchip.c @@ -508,7 +508,7 @@ da8xx_cfgchip_register_usb0_clk48(struct device *dev, const char * const parent_names[] = { "usb_refclkin", "pll0_auxclk" }; struct clk *fck_clk; struct da8xx_usb0_clk48 *usb0; - struct clk_init_data init; + struct clk_init_data init = {}; int ret; fck_clk = devm_clk_get(dev, "fck"); @@ -583,7 +583,7 @@ da8xx_cfgchip_register_usb1_clk48(struct device *dev, { const char * const parent_names[] = { "usb0_clk48", "usb_refclkin" }; struct da8xx_usb1_clk48 *usb1; - struct clk_init_data init; + struct clk_init_data init = {}; int ret; usb1 = devm_kzalloc(dev, sizeof(*usb1), GFP_KERNEL); diff --git a/drivers/clk/meson/s4-peripherals.c b/drivers/clk/meson/s4-peripherals.c index 5e17ca50ab09..73340c7e815e 100644 --- a/drivers/clk/meson/s4-peripherals.c +++ b/drivers/clk/meson/s4-peripherals.c @@ -2978,7 +2978,7 @@ static struct clk_regmap s4_pwm_j_div = { .name = "pwm_j_div", .ops = &clk_regmap_divider_ops, .parent_hws = (const struct clk_hw *[]) { - &s4_pwm_h_mux.hw + &s4_pwm_j_mux.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c index d2650d96400c..707c107a5291 100644 --- a/drivers/clk/meson/s4-pll.c +++ b/drivers/clk/meson/s4-pll.c @@ -38,6 +38,11 @@ static struct clk_regmap s4_fixed_pll_dco = { .shift = 0, .width = 8, }, + .frac = { + .reg_off = ANACTRL_FIXPLL_CTRL1, + .shift = 0, + .width = 17, + }, .n = { .reg_off = ANACTRL_FIXPLL_CTRL0, .shift = 10, diff --git a/drivers/clk/qcom/camcc-sc7280.c b/drivers/clk/qcom/camcc-sc7280.c index d89ddb2298e3..582fb3ba9c89 100644 --- a/drivers/clk/qcom/camcc-sc7280.c +++ b/drivers/clk/qcom/camcc-sc7280.c @@ -2260,6 +2260,7 @@ static struct gdsc cam_cc_bps_gdsc = { .name = "cam_cc_bps_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, .flags = HW_CTRL | RETAIN_FF_ENABLE, }; @@ -2269,6 +2270,7 @@ static struct gdsc cam_cc_ife_0_gdsc = { .name = "cam_cc_ife_0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, .flags = RETAIN_FF_ENABLE, }; @@ -2278,6 +2280,7 @@ static struct gdsc cam_cc_ife_1_gdsc = { .name = "cam_cc_ife_1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, .flags = RETAIN_FF_ENABLE, }; @@ -2287,6 +2290,7 @@ static struct gdsc cam_cc_ife_2_gdsc = { .name = "cam_cc_ife_2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, .flags = RETAIN_FF_ENABLE, }; @@ -2296,6 +2300,7 @@ static struct gdsc cam_cc_ipe_0_gdsc = { .name = "cam_cc_ipe_0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .parent = &cam_cc_titan_top_gdsc.pd, .flags = HW_CTRL | RETAIN_FF_ENABLE, }; diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 9b3aaa7f20ac..30b19bd39d08 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -1304,7 +1304,39 @@ clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) return clk_rcg2_recalc_rate(hw, parent_rate); } +static int clk_rcg2_shared_init(struct clk_hw *hw) +{ + /* + * This does a few things: + * + * 1. Sets rcg->parked_cfg to reflect the value at probe so that the + * proper parent is reported from clk_rcg2_shared_get_parent(). + * + * 2. Clears the force enable bit of the RCG because we rely on child + * clks (branches) to turn the RCG on/off with a hardware feedback + * mechanism and only set the force enable bit in the RCG when we + * want to make sure the clk stays on for parent switches or + * parking. + * + * 3. Parks shared RCGs on the safe source at registration because we + * can't be certain that the parent clk will stay on during boot, + * especially if the parent is shared. If this RCG is enabled at + * boot, and the parent is turned off, the RCG will get stuck on. A + * GDSC can wedge if is turned on and the RCG is stuck on because + * the GDSC's controller will hang waiting for the clk status to + * toggle on when it never does. + * + * The safest option here is to "park" the RCG at init so that the clk + * can never get stuck on or off. This ensures the GDSC can't get + * wedged. + */ + clk_rcg2_shared_disable(hw); + + return 0; +} + const struct clk_ops clk_rcg2_shared_ops = { + .init = clk_rcg2_shared_init, .enable = clk_rcg2_shared_enable, .disable = clk_rcg2_shared_disable, .get_parent = clk_rcg2_shared_get_parent, diff --git a/drivers/clk/qcom/gcc-sa8775p.c b/drivers/clk/qcom/gcc-sa8775p.c index 5bcbfbf52cb9..9bbc0836fae9 100644 --- a/drivers/clk/qcom/gcc-sa8775p.c +++ b/drivers/clk/qcom/gcc-sa8775p.c @@ -4305,74 +4305,114 @@ static struct clk_branch gcc_video_axi1_clk = { static struct gdsc pcie_0_gdsc = { .gdscr = 0xa9004, + .collapse_ctrl = 0x4b104, + .collapse_mask = BIT(0), + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "pcie_0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc pcie_1_gdsc = { .gdscr = 0x77004, + .collapse_ctrl = 0x4b104, + .collapse_mask = BIT(1), + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "pcie_1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc ufs_card_gdsc = { .gdscr = 0x81004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ufs_card_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc ufs_phy_gdsc = { .gdscr = 0x83004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ufs_phy_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc usb20_prim_gdsc = { .gdscr = 0x1c004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "usb20_prim_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc usb30_prim_gdsc = { .gdscr = 0x1b004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "usb30_prim_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc usb30_sec_gdsc = { .gdscr = 0x2f004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "usb30_sec_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc emac0_gdsc = { .gdscr = 0xb6004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "emac0_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct gdsc emac1_gdsc = { .gdscr = 0xb4004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "emac1_gdsc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR, }; static struct clk_regmap *gcc_sa8775p_clocks[] = { diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c index f45a8318900c..67ea9cf5303f 100644 --- a/drivers/clk/qcom/gcc-sc7280.c +++ b/drivers/clk/qcom/gcc-sc7280.c @@ -3463,6 +3463,9 @@ static int gcc_sc7280_probe(struct platform_device *pdev) qcom_branch_set_clk_en(regmap, 0x71004);/* GCC_GPU_CFG_AHB_CLK */ regmap_update_bits(regmap, 0x7100C, BIT(13), BIT(13)); + /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */ + qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true); + ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, ARRAY_SIZE(gcc_dfs_clocks)); if (ret) diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c index 1404017be918..a263f0c412f5 100644 --- a/drivers/clk/qcom/gcc-x1e80100.c +++ b/drivers/clk/qcom/gcc-x1e80100.c @@ -2812,7 +2812,7 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = { static struct clk_branch gcc_pcie_0_pipe_clk = { .halt_reg = 0xa0044, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52010, .enable_mask = BIT(25), @@ -2901,7 +2901,7 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = { static struct clk_branch gcc_pcie_1_pipe_clk = { .halt_reg = 0x2c044, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52020, .enable_mask = BIT(30), @@ -2990,7 +2990,7 @@ static struct clk_branch gcc_pcie_2_mstr_axi_clk = { static struct clk_branch gcc_pcie_2_pipe_clk = { .halt_reg = 0x13044, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52020, .enable_mask = BIT(23), @@ -3110,7 +3110,7 @@ static struct clk_branch gcc_pcie_3_phy_rchng_clk = { static struct clk_branch gcc_pcie_3_pipe_clk = { .halt_reg = 0x58050, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52020, .enable_mask = BIT(3), @@ -3235,7 +3235,7 @@ static struct clk_branch gcc_pcie_4_phy_rchng_clk = { static struct clk_branch gcc_pcie_4_pipe_clk = { .halt_reg = 0x6b044, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52008, .enable_mask = BIT(4), @@ -3360,7 +3360,7 @@ static struct clk_branch gcc_pcie_5_phy_rchng_clk = { static struct clk_branch gcc_pcie_5_pipe_clk = { .halt_reg = 0x2f044, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52018, .enable_mask = BIT(17), @@ -3498,7 +3498,7 @@ static struct clk_branch gcc_pcie_6a_phy_rchng_clk = { static struct clk_branch gcc_pcie_6a_pipe_clk = { .halt_reg = 0x31050, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52018, .enable_mask = BIT(26), @@ -3636,7 +3636,7 @@ static struct clk_branch gcc_pcie_6b_phy_rchng_clk = { static struct clk_branch gcc_pcie_6b_pipe_clk = { .halt_reg = 0x8d050, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52000, .enable_mask = BIT(30), @@ -5109,7 +5109,7 @@ static struct clk_branch gcc_usb3_mp_phy_com_aux_clk = { static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = { .halt_reg = 0x17290, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x17290, .enable_mask = BIT(0), @@ -5122,7 +5122,7 @@ static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = { static struct clk_branch gcc_usb3_mp_phy_pipe_1_clk = { .halt_reg = 0x17298, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x17298, .enable_mask = BIT(0), @@ -5186,7 +5186,7 @@ static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = { static struct clk_branch gcc_usb3_prim_phy_pipe_clk = { .halt_reg = 0x39068, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0x39068, .hwcg_bit = 1, .clkr = { @@ -5257,7 +5257,7 @@ static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = { static struct clk_branch gcc_usb3_sec_phy_pipe_clk = { .halt_reg = 0xa1068, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0xa1068, .hwcg_bit = 1, .clkr = { @@ -5269,6 +5269,7 @@ static struct clk_branch gcc_usb3_sec_phy_pipe_clk = { &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw, }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -5327,7 +5328,7 @@ static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src = { static struct clk_branch gcc_usb3_tert_phy_pipe_clk = { .halt_reg = 0xa2068, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0xa2068, .hwcg_bit = 1, .clkr = { @@ -5339,6 +5340,7 @@ static struct clk_branch gcc_usb3_tert_phy_pipe_clk = { &gcc_usb3_tert_phy_pipe_clk_src.clkr.hw, }, .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -5405,7 +5407,7 @@ static struct clk_branch gcc_usb4_0_master_clk = { static struct clk_branch gcc_usb4_0_phy_p2rr2p_pipe_clk = { .halt_reg = 0x9f0d8, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x9f0d8, .enable_mask = BIT(0), @@ -5418,7 +5420,7 @@ static struct clk_branch gcc_usb4_0_phy_p2rr2p_pipe_clk = { static struct clk_branch gcc_usb4_0_phy_pcie_pipe_clk = { .halt_reg = 0x9f048, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52010, .enable_mask = BIT(19), @@ -5457,7 +5459,7 @@ static struct clk_branch gcc_usb4_0_phy_rx1_clk = { static struct clk_branch gcc_usb4_0_phy_usb_pipe_clk = { .halt_reg = 0x9f0a4, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0x9f0a4, .hwcg_bit = 1, .clkr = { @@ -5582,7 +5584,7 @@ static struct clk_branch gcc_usb4_1_master_clk = { static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = { .halt_reg = 0x2b0d8, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x2b0d8, .enable_mask = BIT(0), @@ -5595,7 +5597,7 @@ static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = { static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = { .halt_reg = 0x2b048, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52028, .enable_mask = BIT(0), @@ -5634,7 +5636,7 @@ static struct clk_branch gcc_usb4_1_phy_rx1_clk = { static struct clk_branch gcc_usb4_1_phy_usb_pipe_clk = { .halt_reg = 0x2b0a4, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0x2b0a4, .hwcg_bit = 1, .clkr = { @@ -5759,7 +5761,7 @@ static struct clk_branch gcc_usb4_2_master_clk = { static struct clk_branch gcc_usb4_2_phy_p2rr2p_pipe_clk = { .halt_reg = 0x110d8, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x110d8, .enable_mask = BIT(0), @@ -5772,7 +5774,7 @@ static struct clk_branch gcc_usb4_2_phy_p2rr2p_pipe_clk = { static struct clk_branch gcc_usb4_2_phy_pcie_pipe_clk = { .halt_reg = 0x11048, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52028, .enable_mask = BIT(1), @@ -5811,7 +5813,7 @@ static struct clk_branch gcc_usb4_2_phy_rx1_clk = { static struct clk_branch gcc_usb4_2_phy_usb_pipe_clk = { .halt_reg = 0x110a4, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .hwcg_reg = 0x110a4, .hwcg_bit = 1, .clkr = { diff --git a/drivers/clk/qcom/gpucc-sa8775p.c b/drivers/clk/qcom/gpucc-sa8775p.c index 1167c42da39d..3deabf833388 100644 --- a/drivers/clk/qcom/gpucc-sa8775p.c +++ b/drivers/clk/qcom/gpucc-sa8775p.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2023, Linaro Limited */ @@ -161,7 +161,7 @@ static struct clk_rcg2 gpu_cc_ff_clk_src = { .name = "gpu_cc_ff_clk_src", .parent_data = gpu_cc_parent_data_0, .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0), - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -181,7 +181,7 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = { .parent_data = gpu_cc_parent_data_1, .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -200,7 +200,7 @@ static struct clk_rcg2 gpu_cc_hub_clk_src = { .name = "gpu_cc_hub_clk_src", .parent_data = gpu_cc_parent_data_2, .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2), - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -280,7 +280,7 @@ static struct clk_branch gpu_cc_ahb_clk = { &gpu_cc_hub_ahb_div_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -294,8 +294,7 @@ static struct clk_branch gpu_cc_cb_clk = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data){ .name = "gpu_cc_cb_clk", - .flags = CLK_IS_CRITICAL, - .ops = &clk_branch2_ops, + .ops = &clk_branch2_aon_ops, }, }, }; @@ -312,7 +311,7 @@ static struct clk_branch gpu_cc_crc_ahb_clk = { &gpu_cc_hub_ahb_div_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -330,7 +329,7 @@ static struct clk_branch gpu_cc_cx_ff_clk = { &gpu_cc_ff_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -348,7 +347,7 @@ static struct clk_branch gpu_cc_cx_gmu_clk = { &gpu_cc_gmu_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_aon_ops, }, }, @@ -362,7 +361,6 @@ static struct clk_branch gpu_cc_cx_snoc_dvm_clk = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data){ .name = "gpu_cc_cx_snoc_dvm_clk", - .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -380,7 +378,7 @@ static struct clk_branch gpu_cc_cxo_aon_clk = { &gpu_cc_xo_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -398,7 +396,7 @@ static struct clk_branch gpu_cc_cxo_clk = { &gpu_cc_xo_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, @@ -416,7 +414,7 @@ static struct clk_branch gpu_cc_demet_clk = { &gpu_cc_demet_div_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_aon_ops, }, }, @@ -430,7 +428,6 @@ static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data){ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk", - .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -448,7 +445,7 @@ static struct clk_branch gpu_cc_hub_aon_clk = { &gpu_cc_hub_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_aon_ops, }, }, @@ -466,7 +463,7 @@ static struct clk_branch gpu_cc_hub_cx_int_clk = { &gpu_cc_hub_cx_int_div_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_aon_ops, }, }, @@ -480,7 +477,6 @@ static struct clk_branch gpu_cc_memnoc_gfx_clk = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data){ .name = "gpu_cc_memnoc_gfx_clk", - .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -494,7 +490,6 @@ static struct clk_branch gpu_cc_sleep_clk = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data){ .name = "gpu_cc_sleep_clk", - .flags = CLK_IS_CRITICAL, .ops = &clk_branch2_ops, }, }, @@ -528,16 +523,22 @@ static struct clk_regmap *gpu_cc_sa8775p_clocks[] = { static struct gdsc cx_gdsc = { .gdscr = 0x9108, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .gds_hw_ctrl = 0x953c, .pd = { .name = "cx_gdsc", }, .pwrsts = PWRSTS_OFF_ON, - .flags = VOTABLE | RETAIN_FF_ENABLE | ALWAYS_ON, + .flags = VOTABLE | RETAIN_FF_ENABLE, }; static struct gdsc gx_gdsc = { .gdscr = 0x905c, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "gx_gdsc", .power_on = gdsc_gx_do_nothing_enable, diff --git a/drivers/clk/qcom/gpucc-sm8350.c b/drivers/clk/qcom/gpucc-sm8350.c index 38505d1388b6..8d9dcff40dd0 100644 --- a/drivers/clk/qcom/gpucc-sm8350.c +++ b/drivers/clk/qcom/gpucc-sm8350.c @@ -2,6 +2,7 @@ /* * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * Copyright (c) 2022, Linaro Limited + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/clk.h> @@ -147,7 +148,7 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = { .parent_data = gpu_cc_parent_data_0, .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -169,7 +170,7 @@ static struct clk_rcg2 gpu_cc_hub_clk_src = { .parent_data = gpu_cc_parent_data_1, .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c index 23b0b11f0007..e7cfa8d22044 100644 --- a/drivers/clk/qcom/kpss-xcc.c +++ b/drivers/clk/qcom/kpss-xcc.c @@ -58,9 +58,7 @@ static int kpss_xcc_driver_probe(struct platform_device *pdev) if (IS_ERR(hw)) return PTR_ERR(hw); - of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get, hw); - - return 0; + return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get, hw); } static struct platform_driver kpss_xcc_driver = { diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index a026ccca7315..28945b6b0ee1 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -1040,19 +1040,20 @@ static unsigned long __init exynos4_get_xom(void) static void __init exynos4_clk_register_finpll(struct samsung_clk_provider *ctx) { struct samsung_fixed_rate_clock fclk; - struct clk *clk; - unsigned long finpll_f = 24000000; + unsigned long finpll_f; + unsigned int parent; char *parent_name; unsigned int xom = exynos4_get_xom(); parent_name = xom & 1 ? "xusbxti" : "xxti"; - clk = clk_get(NULL, parent_name); - if (IS_ERR(clk)) { + parent = xom & 1 ? CLK_XUSBXTI : CLK_XXTI; + + finpll_f = clk_hw_get_rate(ctx->clk_data.hws[parent]); + if (!finpll_f) { pr_err("%s: failed to lookup parent clock %s, assuming " "fin_pll clock frequency is 24MHz\n", __func__, parent_name); - } else { - finpll_f = clk_get_rate(clk); + finpll_f = 24000000; } fclk.id = CLK_FIN_PLL; diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c index fc275d41d51e..66b73c308ce6 100644 --- a/drivers/cpufreq/amd-pstate-ut.c +++ b/drivers/cpufreq/amd-pstate-ut.c @@ -202,6 +202,7 @@ static void amd_pstate_ut_check_freq(u32 index) int cpu = 0; struct cpufreq_policy *policy = NULL; struct amd_cpudata *cpudata = NULL; + u32 nominal_freq_khz; for_each_possible_cpu(cpu) { policy = cpufreq_cpu_get(cpu); @@ -209,13 +210,14 @@ static void amd_pstate_ut_check_freq(u32 index) break; cpudata = policy->driver_data; - if (!((cpudata->max_freq >= cpudata->nominal_freq) && - (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) && + nominal_freq_khz = cpudata->nominal_freq*1000; + if (!((cpudata->max_freq >= nominal_freq_khz) && + (nominal_freq_khz > cpudata->lowest_nonlinear_freq) && (cpudata->lowest_nonlinear_freq > cpudata->min_freq) && (cpudata->min_freq > 0))) { amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n", - __func__, cpu, cpudata->max_freq, cpudata->nominal_freq, + __func__, cpu, cpudata->max_freq, nominal_freq_khz, cpudata->lowest_nonlinear_freq, cpudata->min_freq); goto skip_test; } @@ -229,13 +231,13 @@ static void amd_pstate_ut_check_freq(u32 index) if (cpudata->boost_supported) { if ((policy->max == cpudata->max_freq) || - (policy->max == cpudata->nominal_freq)) + (policy->max == nominal_freq_khz)) amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; else { amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n", __func__, cpu, policy->max, cpudata->max_freq, - cpudata->nominal_freq); + nominal_freq_khz); goto skip_test; } } else { diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 9ad62dbe8bfb..a092b13ffbc2 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -247,6 +247,26 @@ static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata) return index; } +static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, + u32 des_perf, u32 max_perf, bool fast_switch) +{ + if (fast_switch) + wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); + else + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, + READ_ONCE(cpudata->cppc_req_cached)); +} + +DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); + +static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, + u32 min_perf, u32 des_perf, + u32 max_perf, bool fast_switch) +{ + static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, + max_perf, fast_switch); +} + static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) { int ret; @@ -263,6 +283,9 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) if (!ret) cpudata->epp_cached = epp; } else { + amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U, + cpudata->max_limit_perf, false); + perf_ctrls.energy_perf = epp; ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1); if (ret) { @@ -452,16 +475,6 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) return static_call(amd_pstate_init_perf)(cpudata); } -static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, - u32 des_perf, u32 max_perf, bool fast_switch) -{ - if (fast_switch) - wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); - else - wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, - READ_ONCE(cpudata->cppc_req_cached)); -} - static void cppc_update_perf(struct amd_cpudata *cpudata, u32 min_perf, u32 des_perf, u32 max_perf, bool fast_switch) @@ -475,16 +488,6 @@ static void cppc_update_perf(struct amd_cpudata *cpudata, cppc_set_perf(cpudata->cpu, &perf_ctrls); } -DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); - -static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, - u32 min_perf, u32 des_perf, - u32 max_perf, bool fast_switch) -{ - static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, - max_perf, fast_switch); -} - static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) { u64 aperf, mperf, tsc; diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index ea05d9d67490..5004e1dbc752 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -480,23 +480,30 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()), GFP_KERNEL); - if (!drv) + if (!drv) { + of_node_put(np); return -ENOMEM; + } match = pdev->dev.platform_data; drv->data = match->data; - if (!drv->data) + if (!drv->data) { + of_node_put(np); return -ENODEV; + } if (drv->data->get_version) { speedbin_nvmem = of_nvmem_cell_get(np, NULL); - if (IS_ERR(speedbin_nvmem)) + if (IS_ERR(speedbin_nvmem)) { + of_node_put(np); return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), "Could not get nvmem cell\n"); + } ret = drv->data->get_version(cpu_dev, speedbin_nvmem, &pvs_name, drv); if (ret) { + of_node_put(np); nvmem_cell_put(speedbin_nvmem); return ret; } diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index 0b882765cd66..ef83e4bf2639 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -131,7 +131,7 @@ static const struct of_device_id cpu_opp_match_list[] = { static bool dt_has_supported_hw(void) { bool has_opp_supported_hw = false; - struct device_node *np, *opp; + struct device_node *np; struct device *cpu_dev; cpu_dev = get_cpu_device(0); @@ -142,7 +142,7 @@ static bool dt_has_supported_hw(void) if (!np) return false; - for_each_child_of_node(np, opp) { + for_each_child_of_node_scoped(np, opp) { if (of_find_property(opp, "opp-supported-hw", NULL)) { has_opp_supported_hw = true; break; diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 714ed53753fa..5af85c4cbad0 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -417,7 +417,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev) ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config); if (ret < 0) { - dev_err(opp_data->cpu_dev, "Failed to set OPP config\n"); + dev_err_probe(opp_data->cpu_dev, ret, "Failed to set OPP config\n"); goto fail_put_node; } diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index 24ffdf505023..2034f6031518 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -106,7 +106,7 @@ static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp) if (cmd.data[0] == 0xff) { dev_err(&client->dev, "failed, device not ready\n"); - return -ret; + return -EINVAL; } memcpy(otp, cmd.data+1, 4); diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 2102377f727b..1912bee22dd4 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1642,10 +1642,16 @@ static int sev_update_firmware(struct device *dev) static int __sev_snp_shutdown_locked(int *error, bool panic) { - struct sev_device *sev = psp_master->sev_data; + struct psp_device *psp = psp_master; + struct sev_device *sev; struct sev_data_snp_shutdown_ex data; int ret; + if (!psp || !psp->sev_data) + return 0; + + sev = psp->sev_data; + if (!sev->snp_initialized) return 0; diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c index 8836f015c39c..2cf102ad4ca8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c @@ -290,17 +290,19 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, * 3. if the key exists with the same value, then return without doing * anything (the newly created key_val is freed). */ + down_write(&cfg->lock); if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) { if (strncmp(temp_val, key_val->val, sizeof(temp_val))) { adf_cfg_keyval_remove(key, section); } else { kfree(key_val); - return 0; + goto out; } } - down_write(&cfg->lock); adf_cfg_keyval_add(key_val, section); + +out: up_write(&cfg->lock); return 0; } diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 057d73c370b7..c82775dbb557 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -225,7 +225,8 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, struct skcipher_request *req, int init) { - dma_addr_t key_phys, src_phys, dst_phys; + dma_addr_t key_phys = 0; + dma_addr_t src_phys, dst_phys; struct dcp *sdcp = global_sdcp; struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c index 9955874b3dc3..f94c0331b148 100644 --- a/drivers/crypto/tegra/tegra-se-main.c +++ b/drivers/crypto/tegra/tegra-se-main.c @@ -326,7 +326,6 @@ static void tegra_se_remove(struct platform_device *pdev) crypto_engine_stop(se->engine); crypto_engine_exit(se->engine); - iommu_fwspec_free(se->dev); host1x_client_unregister(&se->client); } diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 3af430787315..0af934b56a6c 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -758,6 +758,8 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan, fsl_desc->iscyclic = false; fsl_chan->is_sw = true; + if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE) + fsl_chan->is_remote = true; /* To match with copy_align and max_seg_size so 1 tcd is enough */ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst, @@ -837,6 +839,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan) fsl_chan->tcd_pool = NULL; fsl_chan->is_sw = false; fsl_chan->srcid = 0; + fsl_chan->is_remote = false; if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) clk_disable_unprepare(fsl_chan->clk); } diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index ac66222c1604..268db3876787 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -194,6 +194,7 @@ struct fsl_edma_desc { #define FSL_EDMA_DRV_HAS_PD BIT(5) #define FSL_EDMA_DRV_HAS_CHCLK BIT(6) #define FSL_EDMA_DRV_HAS_CHMUX BIT(7) +#define FSL_EDMA_DRV_MEM_REMOTE BIT(8) /* control and status register is in tcd address space, edma3 reg layout */ #define FSL_EDMA_DRV_SPLIT_REG BIT(9) #define FSL_EDMA_DRV_BUS_8BYTE BIT(10) diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index 391e4f13dfeb..43d84cfefbe2 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -342,7 +342,7 @@ static struct fsl_edma_drvdata imx7ulp_data = { }; static struct fsl_edma_drvdata imx8qm_data = { - .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3, + .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE, .chreg_space_sz = 0x10000, .chreg_off = 0x10000, .setup_irq = fsl_edma3_irq_init, diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 6400d06588a2..df507d96660b 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -4472,7 +4472,9 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); break; case DMA_TYPE_BCDMA: - ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2); + ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) + + BCDMA_CAP3_HBCHAN_CNT(cap3) + + BCDMA_CAP3_UBCHAN_CNT(cap3); ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2); ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2); ud->rflow_cnt = ud->rchan_cnt; diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 9c09893695b7..4edfb83ffbee 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -54,11 +54,13 @@ obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac_mod.o layerscape_edac_mod-y := fsl_ddr_edac.o layerscape_edac.o obj-$(CONFIG_EDAC_LAYERSCAPE) += layerscape_edac_mod.o -skx_edac-y := skx_common.o skx_base.o -obj-$(CONFIG_EDAC_SKX) += skx_edac.o +skx_edac_common-y := skx_common.o -i10nm_edac-y := skx_common.o i10nm_base.o -obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o +skx_edac-y := skx_base.o +obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o + +i10nm_edac-y := i10nm_base.o +obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o obj-$(CONFIG_EDAC_CELL) += cell_edac.o obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 27996b7924c8..8d18099fd528 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -48,7 +48,7 @@ static u64 skx_tolm, skx_tohm; static LIST_HEAD(dev_edac_list); static bool skx_mem_cfg_2lm; -int __init skx_adxl_get(void) +int skx_adxl_get(void) { const char * const *names; int i, j; @@ -110,12 +110,14 @@ int __init skx_adxl_get(void) return -ENODEV; } +EXPORT_SYMBOL_GPL(skx_adxl_get); -void __exit skx_adxl_put(void) +void skx_adxl_put(void) { kfree(adxl_values); kfree(adxl_msg); } +EXPORT_SYMBOL_GPL(skx_adxl_put); static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem) { @@ -187,12 +189,14 @@ void skx_set_mem_cfg(bool mem_cfg_2lm) { skx_mem_cfg_2lm = mem_cfg_2lm; } +EXPORT_SYMBOL_GPL(skx_set_mem_cfg); void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log) { driver_decode = decode; skx_show_retry_rd_err_log = show_retry_log; } +EXPORT_SYMBOL_GPL(skx_set_decode); int skx_get_src_id(struct skx_dev *d, int off, u8 *id) { @@ -206,6 +210,7 @@ int skx_get_src_id(struct skx_dev *d, int off, u8 *id) *id = GET_BITFIELD(reg, 12, 14); return 0; } +EXPORT_SYMBOL_GPL(skx_get_src_id); int skx_get_node_id(struct skx_dev *d, u8 *id) { @@ -219,6 +224,7 @@ int skx_get_node_id(struct skx_dev *d, u8 *id) *id = GET_BITFIELD(reg, 0, 2); return 0; } +EXPORT_SYMBOL_GPL(skx_get_node_id); static int get_width(u32 mtr) { @@ -284,6 +290,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list) *list = &dev_edac_list; return ndev; } +EXPORT_SYMBOL_GPL(skx_get_all_bus_mappings); int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm) { @@ -323,6 +330,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm) pci_dev_put(pdev); return -ENODEV; } +EXPORT_SYMBOL_GPL(skx_get_hi_lo); static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval, int maxval, const char *name) @@ -394,6 +402,7 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm, return 1; } +EXPORT_SYMBOL_GPL(skx_get_dimm_info); int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, int chan, int dimmno, const char *mod_str) @@ -442,6 +451,7 @@ int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, return (size == 0 || size == ~0ull) ? 0 : 1; } +EXPORT_SYMBOL_GPL(skx_get_nvdimm_info); int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev, const char *ctl_name, const char *mod_str, @@ -512,6 +522,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev, imc->mci = NULL; return rc; } +EXPORT_SYMBOL_GPL(skx_register_mci); static void skx_unregister_mci(struct skx_imc *imc) { @@ -688,6 +699,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val, mce->kflags |= MCE_HANDLED_EDAC; return NOTIFY_DONE; } +EXPORT_SYMBOL_GPL(skx_mce_check_error); void skx_remove(void) { @@ -725,3 +737,8 @@ void skx_remove(void) kfree(d); } } +EXPORT_SYMBOL_GPL(skx_remove); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Tony Luck"); +MODULE_DESCRIPTION("MC Driver for Intel server processors"); diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h index b6d3607dffe2..11faf1db4fa4 100644 --- a/drivers/edac/skx_common.h +++ b/drivers/edac/skx_common.h @@ -231,8 +231,8 @@ typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci, typedef bool (*skx_decode_f)(struct decoded_addr *res); typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len, bool scrub_err); -int __init skx_adxl_get(void); -void __exit skx_adxl_put(void); +int skx_adxl_get(void); +void skx_adxl_put(void); void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log); void skx_set_mem_cfg(bool mem_cfg_2lm); diff --git a/drivers/firmware/efi/libstub/screen_info.c b/drivers/firmware/efi/libstub/screen_info.c index a51ec201ca3c..5d3a1e32d177 100644 --- a/drivers/firmware/efi/libstub/screen_info.c +++ b/drivers/firmware/efi/libstub/screen_info.c @@ -32,6 +32,8 @@ struct screen_info *__alloc_screen_info(void) if (status != EFI_SUCCESS) return NULL; + memset(si, 0, sizeof(*si)); + status = efi_bs_call(install_configuration_table, &screen_info_guid, si); if (status == EFI_SUCCESS) diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 1983fd3bf392..99d39eda5134 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -469,11 +469,12 @@ void __noreturn efi_stub_entry(efi_handle_t handle, efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg) { - static struct boot_params boot_params __page_aligned_bss; - struct setup_header *hdr = &boot_params.hdr; efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID; + struct boot_params *boot_params; + struct setup_header *hdr; int options_size = 0; efi_status_t status; + unsigned long alloc; char *cmdline_ptr; if (efi_is_native()) @@ -491,6 +492,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, efi_exit(handle, status); } + status = efi_allocate_pages(PARAM_SIZE, &alloc, ULONG_MAX); + if (status != EFI_SUCCESS) + efi_exit(handle, status); + + boot_params = memset((void *)alloc, 0x0, PARAM_SIZE); + hdr = &boot_params->hdr; + /* Assign the setup_header fields that the kernel actually cares about */ hdr->root_flags = 1; hdr->vid_mode = 0xffff; @@ -500,17 +508,16 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, /* Convert unicode cmdline to ascii */ cmdline_ptr = efi_convert_cmdline(image, &options_size); - if (!cmdline_ptr) - goto fail; + if (!cmdline_ptr) { + efi_free(PARAM_SIZE, alloc); + efi_exit(handle, EFI_OUT_OF_RESOURCES); + } efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr, - &boot_params.ext_cmd_line_ptr); + &boot_params->ext_cmd_line_ptr); - efi_stub_entry(handle, sys_table_arg, &boot_params); + efi_stub_entry(handle, sys_table_arg, boot_params); /* not reached */ - -fail: - efi_exit(handle, status); } static void add_e820ext(struct boot_params *params, diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index 31d962cdd6eb..3e7f186d239a 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -2,7 +2,7 @@ /* * Turris Mox rWTM firmware driver * - * Copyright (C) 2019 Marek Behún <kabel@xxxxxxxxxx> + * Copyright (C) 2019, 2024 Marek Behún <kabel@xxxxxxxxxx> */ #include <linux/armada-37xx-rwtm-mailbox.h> @@ -174,6 +174,9 @@ static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data) struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev); struct armada_37xx_rwtm_rx_msg *msg = data; + if (completion_done(&rwtm->cmd_done)) + return; + rwtm->reply = *msg; complete(&rwtm->cmd_done); } @@ -199,9 +202,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) if (ret < 0) return ret; - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval); if (ret == -ENODATA) { @@ -235,9 +237,8 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) if (ret < 0) return ret; - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval); if (ret == -ENODATA) { @@ -274,9 +275,8 @@ static int check_get_random_support(struct mox_rwtm *rwtm) if (ret < 0) return ret; - ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); - if (ret < 0) - return ret; + if (!wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2)) + return -ETIMEDOUT; return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval); } @@ -499,6 +499,7 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, rwtm); mutex_init(&rwtm->busy); + init_completion(&rwtm->cmd_done); rwtm->mbox_client.dev = dev; rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback; @@ -512,8 +513,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) goto remove_files; } - init_completion(&rwtm->cmd_done); - ret = mox_get_board_info(rwtm); if (ret < 0) dev_warn(dev, "Cannot read board information: %i\n", ret); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index d0aa277fc3bf..359b68adafc1 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -106,8 +106,7 @@ config DRM_KMS_HELPER config DRM_PANIC bool "Display a user-friendly message when a kernel panic occurs" - depends on DRM && !FRAMEBUFFER_CONSOLE - select DRM_KMS_HELPER + depends on DRM && !(FRAMEBUFFER_CONSOLE && VT_CONSOLE) select FONT_SUPPORT help Enable a drm panic handler, which will display a user-friendly message diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 33f791d92ddf..ee7df1d84e02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -6173,7 +6173,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev) adev->nbio.funcs->enable_doorbell_interrupt) adev->nbio.funcs->enable_doorbell_interrupt(adev, true); - if (amdgpu_passthrough(adev) && + if (amdgpu_passthrough(adev) && adev->nbio.funcs && adev->nbio.funcs->clear_doorbell_interrupt) adev->nbio.funcs->clear_doorbell_interrupt(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 1d955652f3ba..e92bdc9a39d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -329,8 +329,9 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id) ring->eop_gpu_addr = kiq->eop_gpu_addr; ring->no_scheduler = true; - snprintf(ring->name, sizeof(ring->name), "kiq_%d.%d.%d.%d", - xcc_id, ring->me, ring->pipe, ring->queue); + snprintf(ring->name, sizeof(ring->name), "kiq_%hhu.%hhu.%hhu.%hhu", + (unsigned char)xcc_id, (unsigned char)ring->me, + (unsigned char)ring->pipe, (unsigned char)ring->queue); r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0, AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 08b9dfb65335..86b096ad0319 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -878,7 +878,6 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) struct amdgpu_gmc *gmc = &adev->gmc; uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) || - gc_ver == IP_VERSION(9, 3, 0) || gc_ver == IP_VERSION(9, 4, 0) || gc_ver == IP_VERSION(9, 4, 1) || gc_ver == IP_VERSION(9, 4, 2) || diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 4e2391c83d7c..0f7106066480 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -434,7 +434,7 @@ uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm) if (!vm) return result; - result += vm->generation; + result += lower_32_bits(vm->generation); /* Add one if the page tables will be re-generated on next CS */ if (drm_sched_entity_error(&vm->delayed)) ++result; @@ -463,13 +463,14 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*validate)(void *p, struct amdgpu_bo *bo), void *param) { + uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm); struct amdgpu_vm_bo_base *bo_base; struct amdgpu_bo *shadow; struct amdgpu_bo *bo; int r; - if (drm_sched_entity_error(&vm->delayed)) { - ++vm->generation; + if (vm->generation != new_vm_generation) { + vm->generation = new_vm_generation; amdgpu_vm_bo_reset_state_machine(vm); amdgpu_vm_fini_entities(vm); r = amdgpu_vm_init_entities(adev, vm); @@ -2441,7 +2442,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->last_update = dma_fence_get_stub(); vm->last_unlocked = dma_fence_get_stub(); vm->last_tlb_flush = dma_fence_get_stub(); - vm->generation = 0; + vm->generation = amdgpu_vm_generation(adev, NULL); mutex_init(&vm->eviction_lock); vm->evicting = false; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c4ec1358f3aa..f7f492475102 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1910,7 +1910,7 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, break; } - size = adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT; + size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT; size /= adev->gmc.num_mem_partitions; for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index cc9e961f0078..af1e90159ce3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -176,6 +176,14 @@ static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring) DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", ring->doorbell_index, ring->wptr << 2); WDOORBELL64(ring->doorbell_index, ring->wptr << 2); + /* SDMA seems to miss doorbells sometimes when powergating kicks in. + * Updating the wptr directly will wake it. This is only safe because + * we disallow gfxoff in begin_use() and then allow it again in end_use(). + */ + WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), + lower_32_bits(ring->wptr << 2)); + WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), + upper_32_bits(ring->wptr << 2)); } else { DRM_DEBUG("Not using doorbell -- " "mmSDMA%i_GFX_RB_WPTR == 0x%08x " @@ -1647,6 +1655,10 @@ static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring) * but it shouldn't hurt for other parts since * this GFXOFF will be disallowed anyway when SDMA is * active, this just makes it explicit. + * sdma_v5_2_ring_set_wptr() takes advantage of this + * to update the wptr because sometimes SDMA seems to miss + * doorbells when entering PG. If you remove this, update + * sdma_v5_2_ring_set_wptr() as well! */ amdgpu_gfx_off_ctrl(adev, false); } diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c index 04c797d54511..0af648931df5 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c @@ -91,7 +91,7 @@ static int smu_v13_0_10_mode2_suspend_ip(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; } - return r; + return 0; } static int diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index ac1b8ead03b3..9a33d3d00065 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -1053,6 +1053,9 @@ static int vcn_v4_0_start(struct amdgpu_device *adev) amdgpu_dpm_enable_uvd(adev, true); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { @@ -1506,6 +1509,9 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev) int i, r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 81fb99729f37..30e80c6f11ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -964,6 +964,9 @@ static int vcn_v4_0_5_start(struct amdgpu_device *adev) amdgpu_dpm_enable_uvd(adev, true); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { @@ -1168,6 +1171,9 @@ static int vcn_v4_0_5_stop(struct amdgpu_device *adev) int i, r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index 851975b5ce29..fbd3f7a582c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -722,6 +722,9 @@ static int vcn_v5_0_0_start(struct amdgpu_device *adev) amdgpu_dpm_enable_uvd(adev, true); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { @@ -899,6 +902,9 @@ static int vcn_v5_0_0_stop(struct amdgpu_device *adev) int i, r = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 6bddc16808d7..8ec136eba54a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -713,7 +713,7 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd + size * xcc); update_mqd(mm, m, q, minfo); - update_cu_mask(mm, mqd, minfo, xcc); + update_cu_mask(mm, m, minfo, xcc); if (q->format == KFD_QUEUE_FORMAT_AQL) { switch (xcc) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index 067f6555cfdf..ccbb15f1638c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -143,7 +143,8 @@ const struct dc_plane_status *dc_plane_get_status( if (pipe_ctx->plane_state != plane_state) continue; - pipe_ctx->plane_state->status.is_flip_pending = false; + if (pipe_ctx->plane_state) + pipe_ctx->plane_state->status.is_flip_pending = false; break; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3c33c3bcbe2c..4362fca1f15a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1392,6 +1392,7 @@ struct dc { } scratch; struct dml2_configuration_options dml2_options; + struct dml2_configuration_options dml2_tmp; enum dc_acpi_cm_power_state power_state; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c index 282d70e2b18a..3d29169dd6bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c @@ -750,6 +750,8 @@ static void enable_phantom_plane(struct dml2_context *ctx, ctx->config.svp_pstate.callbacks.dc, state, curr_pipe->plane_state); + if (!phantom_plane) + return; } memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 8ecc972dbffd..edff6b447680 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -804,7 +804,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p } } -static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context) +static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out) { int i; struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe; @@ -825,7 +825,7 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state } ASSERT(i < MAX_PIPES); - return temp_pipe->plane_res.scl_data; + memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out)); } static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in) @@ -884,27 +884,31 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, struct dc_state *context) { - const struct scaler_data scaler_data = get_scaler_data_for_plane(in, context); + struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL); + if (!scaler_data) + return; + + get_scaler_data_for_plane(in, context, scaler_data); out->CursorBPP[location] = dml_cur_32bit; out->CursorWidth[location] = 256; out->GPUVMMinPageSizeKBytes[location] = 256; - out->ViewportWidth[location] = scaler_data.viewport.width; - out->ViewportHeight[location] = scaler_data.viewport.height; - out->ViewportWidthChroma[location] = scaler_data.viewport_c.width; - out->ViewportHeightChroma[location] = scaler_data.viewport_c.height; - out->ViewportXStart[location] = scaler_data.viewport.x; - out->ViewportYStart[location] = scaler_data.viewport.y; - out->ViewportXStartC[location] = scaler_data.viewport_c.x; - out->ViewportYStartC[location] = scaler_data.viewport_c.y; + out->ViewportWidth[location] = scaler_data->viewport.width; + out->ViewportHeight[location] = scaler_data->viewport.height; + out->ViewportWidthChroma[location] = scaler_data->viewport_c.width; + out->ViewportHeightChroma[location] = scaler_data->viewport_c.height; + out->ViewportXStart[location] = scaler_data->viewport.x; + out->ViewportYStart[location] = scaler_data->viewport.y; + out->ViewportXStartC[location] = scaler_data->viewport_c.x; + out->ViewportYStartC[location] = scaler_data->viewport_c.y; out->ViewportStationary[location] = false; - out->ScalerEnabled[location] = scaler_data.ratios.horz.value != dc_fixpt_one.value || - scaler_data.ratios.horz_c.value != dc_fixpt_one.value || - scaler_data.ratios.vert.value != dc_fixpt_one.value || - scaler_data.ratios.vert_c.value != dc_fixpt_one.value; + out->ScalerEnabled[location] = scaler_data->ratios.horz.value != dc_fixpt_one.value || + scaler_data->ratios.horz_c.value != dc_fixpt_one.value || + scaler_data->ratios.vert.value != dc_fixpt_one.value || + scaler_data->ratios.vert_c.value != dc_fixpt_one.value; /* Current driver code base uses LBBitPerPixel as 57. There is a discrepancy * from the HW/DML teams about this value. Initialize LBBitPerPixel with the @@ -920,25 +924,25 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out out->VRatioChroma[location] = 1; } else { /* Follow the original dml_wrapper.c code direction to fix scaling issues */ - out->HRatio[location] = (dml_float_t)scaler_data.ratios.horz.value / (1ULL << 32); - out->HRatioChroma[location] = (dml_float_t)scaler_data.ratios.horz_c.value / (1ULL << 32); - out->VRatio[location] = (dml_float_t)scaler_data.ratios.vert.value / (1ULL << 32); - out->VRatioChroma[location] = (dml_float_t)scaler_data.ratios.vert_c.value / (1ULL << 32); + out->HRatio[location] = (dml_float_t)scaler_data->ratios.horz.value / (1ULL << 32); + out->HRatioChroma[location] = (dml_float_t)scaler_data->ratios.horz_c.value / (1ULL << 32); + out->VRatio[location] = (dml_float_t)scaler_data->ratios.vert.value / (1ULL << 32); + out->VRatioChroma[location] = (dml_float_t)scaler_data->ratios.vert_c.value / (1ULL << 32); } - if (!scaler_data.taps.h_taps) { + if (!scaler_data->taps.h_taps) { out->HTaps[location] = 1; out->HTapsChroma[location] = 1; } else { - out->HTaps[location] = scaler_data.taps.h_taps; - out->HTapsChroma[location] = scaler_data.taps.h_taps_c; + out->HTaps[location] = scaler_data->taps.h_taps; + out->HTapsChroma[location] = scaler_data->taps.h_taps_c; } - if (!scaler_data.taps.v_taps) { + if (!scaler_data->taps.v_taps) { out->VTaps[location] = 1; out->VTapsChroma[location] = 1; } else { - out->VTaps[location] = scaler_data.taps.v_taps; - out->VTapsChroma[location] = scaler_data.taps.v_taps_c; + out->VTaps[location] = scaler_data->taps.v_taps; + out->VTapsChroma[location] = scaler_data->taps.v_taps_c; } out->SourceScan[location] = (enum dml_rotation_angle)in->rotation; @@ -949,6 +953,8 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out out->DynamicMetadataTransmittedBytes[location] = 0; out->NumberOfCursors[location] = 1; + + kfree(scaler_data); } static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c index 5574bc628053..f109a101d84f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c @@ -945,19 +945,10 @@ void optc1_set_drr( OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK_EN, 0, OTG_SET_V_TOTAL_MIN_MASK, 0); - - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); - - } else { - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, - OTG_SET_V_TOTAL_MIN_MASK, 0, - OTG_V_TOTAL_MIN_SEL, 0, - OTG_V_TOTAL_MAX_SEL, 0, - OTG_FORCE_LOCK_ON_EVENT, 0); - - optc->funcs->set_vtotal_min_max(optc, 0, 0); } + + // Setup manual flow control for EOF via TRIG_A + optc->funcs->setup_manual_trigger(optc); } void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c index d6f095b4555d..58bdbd859bf9 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c @@ -462,6 +462,16 @@ void optc2_setup_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); + /* Set the min/max selectors unconditionally so that + * DMCUB fw may change OTG timings when necessary + * TODO: Remove the w/a after fixing the issue in DMCUB firmware + */ + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ + REG_SET_8(OTG_TRIGA_CNTL, 0, OTG_TRIGA_SOURCE_SELECT, 21, OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index abd76345d1e4..d84c8e0e5c2f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -2006,19 +2006,21 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options dml2_opt = dc->dml2_options; + struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; + + memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); DC_FP_START(); dcn32_update_bw_bounding_box_fpu(dc, bw_params); - dml2_opt.use_clock_dc_limits = false; + dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); - dml2_opt.use_clock_dc_limits = true; + dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index e4b360d89b3b..9a3cc0514a36 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1581,19 +1581,21 @@ static struct dc_cap_funcs cap_funcs = { static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options dml2_opt = dc->dml2_options; + struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; + + memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); DC_FP_START(); dcn321_update_bw_bounding_box_fpu(dc, bw_params); - dml2_opt.use_clock_dc_limits = false; + dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); - dml2_opt.use_clock_dc_limits = true; + dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h index 08ee0350b31f..54e33062b3c0 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_id.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h @@ -226,8 +226,8 @@ enum dp_alt_mode { struct graphics_object_id { uint32_t id:8; - enum object_enum_id enum_id; - enum object_type type; + enum object_enum_id enum_id :4; + enum object_type type :4; uint32_t reserved:16; /* for padding. total size should be u32 */ }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index a8d34adc7d3f..b63ad9cb24bf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -79,8 +79,8 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 #define smnPCIE_LC_SPEED_CNTL 0x11140290 -#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 -#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0 +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index 2c661f28410e..b645c5998230 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -5,6 +5,7 @@ * */ #include <linux/clk.h> +#include <linux/of.h> #include <linux/pm_runtime.h> #include <linux/spinlock.h> @@ -610,12 +611,34 @@ get_crtc_primary(struct komeda_kms_dev *kms, struct komeda_crtc *crtc) return NULL; } +static int komeda_attach_bridge(struct device *dev, + struct komeda_pipeline *pipe, + struct drm_encoder *encoder) +{ + struct drm_bridge *bridge; + int err; + + bridge = devm_drm_of_get_bridge(dev, pipe->of_node, + KOMEDA_OF_PORT_OUTPUT, 0); + if (IS_ERR(bridge)) + return dev_err_probe(dev, PTR_ERR(bridge), "remote bridge not found for pipe: %s\n", + of_node_full_name(pipe->of_node)); + + err = drm_bridge_attach(encoder, bridge, NULL, 0); + if (err) + dev_err(dev, "bridge_attach() failed for pipe: %s\n", + of_node_full_name(pipe->of_node)); + + return err; +} + static int komeda_crtc_add(struct komeda_kms_dev *kms, struct komeda_crtc *kcrtc) { struct drm_crtc *crtc = &kcrtc->base; struct drm_device *base = &kms->base; - struct drm_bridge *bridge; + struct komeda_pipeline *pipe = kcrtc->master; + struct drm_encoder *encoder = &kcrtc->encoder; int err; err = drm_crtc_init_with_planes(base, crtc, @@ -626,27 +649,25 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms, drm_crtc_helper_add(crtc, &komeda_crtc_helper_funcs); - crtc->port = kcrtc->master->of_output_port; + crtc->port = pipe->of_output_port; /* Construct an encoder for each pipeline and attach it to the remote * bridge */ kcrtc->encoder.possible_crtcs = drm_crtc_mask(crtc); - err = drm_simple_encoder_init(base, &kcrtc->encoder, - DRM_MODE_ENCODER_TMDS); + err = drm_simple_encoder_init(base, encoder, DRM_MODE_ENCODER_TMDS); if (err) return err; - bridge = devm_drm_of_get_bridge(base->dev, kcrtc->master->of_node, - KOMEDA_OF_PORT_OUTPUT, 0); - if (IS_ERR(bridge)) - return PTR_ERR(bridge); - - err = drm_bridge_attach(&kcrtc->encoder, bridge, NULL, 0); + if (pipe->of_output_links[0]) { + err = komeda_attach_bridge(base->dev, pipe, encoder); + if (err) + return err; + } drm_crtc_enable_color_mgmt(crtc, 0, true, KOMEDA_COLOR_LUT_SIZE); - return err; + return 0; } int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev) diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index ea271f62b214..ec0b7f3d889c 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -401,7 +401,7 @@ struct adv7511 { #ifdef CONFIG_DRM_I2C_ADV7511_CEC int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511); -void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); +int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); #else static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) { diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c index 44451a9658a3..2e9c88a2b5ed 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c @@ -119,7 +119,7 @@ static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf) cec_received_msg(adv7511->cec_adap, &msg); } -void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) +int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) { unsigned int offset = adv7511->info->reg_cec_offset; const u32 irq_tx_mask = ADV7511_INT1_CEC_TX_READY | @@ -131,16 +131,19 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) unsigned int rx_status; int rx_order[3] = { -1, -1, -1 }; int i; + int irq_status = IRQ_NONE; - if (irq1 & irq_tx_mask) + if (irq1 & irq_tx_mask) { adv_cec_tx_raw_status(adv7511, irq1); + irq_status = IRQ_HANDLED; + } if (!(irq1 & irq_rx_mask)) - return; + return irq_status; if (regmap_read(adv7511->regmap_cec, ADV7511_REG_CEC_RX_STATUS + offset, &rx_status)) - return; + return irq_status; /* * ADV7511_REG_CEC_RX_STATUS[5:0] contains the reception order of RX @@ -172,6 +175,8 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) adv7511_cec_rx(adv7511, rx_buf); } + + return IRQ_HANDLED; } static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable) diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 66ccb61e2a66..c8d2c4a157b2 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -469,6 +469,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd) { unsigned int irq0, irq1; int ret; + int cec_status = IRQ_NONE; + int irq_status = IRQ_NONE; ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0); if (ret < 0) @@ -478,29 +480,31 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd) if (ret < 0) return ret; - /* If there is no IRQ to handle, exit indicating no IRQ data */ - if (!(irq0 & (ADV7511_INT0_HPD | ADV7511_INT0_EDID_READY)) && - !(irq1 & ADV7511_INT1_DDC_ERROR)) - return -ENODATA; - regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); - if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) + if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) { schedule_work(&adv7511->hpd_work); + irq_status = IRQ_HANDLED; + } if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { adv7511->edid_read = true; if (adv7511->i2c_main->irq) wake_up_all(&adv7511->wq); + irq_status = IRQ_HANDLED; } #ifdef CONFIG_DRM_I2C_ADV7511_CEC - adv7511_cec_irq_process(adv7511, irq1); + cec_status = adv7511_cec_irq_process(adv7511, irq1); #endif - return 0; + /* If there is no IRQ to handle, exit indicating no IRQ data */ + if (irq_status == IRQ_HANDLED || cec_status == IRQ_HANDLED) + return IRQ_HANDLED; + + return IRQ_NONE; } static irqreturn_t adv7511_irq_handler(int irq, void *devid) @@ -509,7 +513,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid) int ret; ret = adv7511_irq_process(adv7511, true); - return ret < 0 ? IRQ_NONE : IRQ_HANDLED; + return ret < 0 ? IRQ_NONE : ret; } /* ----------------------------------------------------------------------------- diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 3f68c82888c2..cf59347d3d60 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -1307,9 +1307,15 @@ static void it6505_video_reset(struct it6505 *it6505) it6505_link_reset_step_train(it6505); it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, EN_VID_MUTE); it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, 0x00); - it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET); + + it6505_set_bits(it6505, REG_VID_BUS_CTRL1, TX_FIFO_RESET, TX_FIFO_RESET); + it6505_set_bits(it6505, REG_VID_BUS_CTRL1, TX_FIFO_RESET, 0x00); + it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, RST_501_FIFO); it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, 0x00); + + it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET); + usleep_range(1000, 2000); it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, 0x00); } @@ -2245,12 +2251,11 @@ static void it6505_link_training_work(struct work_struct *work) if (ret) { it6505->auto_train_retry = AUTO_TRAIN_RETRY; it6505_link_train_ok(it6505); - return; } else { it6505->auto_train_retry--; + it6505_dump(it6505); } - it6505_dump(it6505); } static void it6505_plugged_status_to_codec(struct it6505 *it6505) @@ -2471,31 +2476,53 @@ static void it6505_irq_link_train_fail(struct it6505 *it6505) schedule_work(&it6505->link_works); } -static void it6505_irq_video_fifo_error(struct it6505 *it6505) +static bool it6505_test_bit(unsigned int bit, const unsigned int *addr) { - struct device *dev = it6505->dev; - - DRM_DEV_DEBUG_DRIVER(dev, "video fifo overflow interrupt"); - it6505->auto_train_retry = AUTO_TRAIN_RETRY; - flush_work(&it6505->link_works); - it6505_stop_hdcp(it6505); - it6505_video_reset(it6505); + return 1 & (addr[bit / BITS_PER_BYTE] >> (bit % BITS_PER_BYTE)); } -static void it6505_irq_io_latch_fifo_overflow(struct it6505 *it6505) +static void it6505_irq_video_handler(struct it6505 *it6505, const int *int_status) { struct device *dev = it6505->dev; + int reg_0d, reg_int03; - DRM_DEV_DEBUG_DRIVER(dev, "IO latch fifo overflow interrupt"); - it6505->auto_train_retry = AUTO_TRAIN_RETRY; - flush_work(&it6505->link_works); - it6505_stop_hdcp(it6505); - it6505_video_reset(it6505); -} + /* + * When video SCDT change with video not stable, + * Or video FIFO error, need video reset + */ -static bool it6505_test_bit(unsigned int bit, const unsigned int *addr) -{ - return 1 & (addr[bit / BITS_PER_BYTE] >> (bit % BITS_PER_BYTE)); + if ((!it6505_get_video_status(it6505) && + (it6505_test_bit(INT_SCDT_CHANGE, (unsigned int *)int_status))) || + (it6505_test_bit(BIT_INT_IO_FIFO_OVERFLOW, + (unsigned int *)int_status)) || + (it6505_test_bit(BIT_INT_VID_FIFO_ERROR, + (unsigned int *)int_status))) { + it6505->auto_train_retry = AUTO_TRAIN_RETRY; + flush_work(&it6505->link_works); + it6505_stop_hdcp(it6505); + it6505_video_reset(it6505); + + usleep_range(10000, 11000); + + /* + * Clear FIFO error IRQ to prevent fifo error -> reset loop + * HW will trigger SCDT change IRQ again when video stable + */ + + reg_int03 = it6505_read(it6505, INT_STATUS_03); + reg_0d = it6505_read(it6505, REG_SYSTEM_STS); + + reg_int03 &= (BIT(INT_VID_FIFO_ERROR) | BIT(INT_IO_LATCH_FIFO_OVERFLOW)); + it6505_write(it6505, INT_STATUS_03, reg_int03); + + DRM_DEV_DEBUG_DRIVER(dev, "reg08 = 0x%02x", reg_int03); + DRM_DEV_DEBUG_DRIVER(dev, "reg0D = 0x%02x", reg_0d); + + return; + } + + if (it6505_test_bit(INT_SCDT_CHANGE, (unsigned int *)int_status)) + it6505_irq_scdt(it6505); } static irqreturn_t it6505_int_threaded_handler(int unused, void *data) @@ -2508,15 +2535,12 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data) } irq_vec[] = { { BIT_INT_HPD, it6505_irq_hpd }, { BIT_INT_HPD_IRQ, it6505_irq_hpd_irq }, - { BIT_INT_SCDT, it6505_irq_scdt }, { BIT_INT_HDCP_FAIL, it6505_irq_hdcp_fail }, { BIT_INT_HDCP_DONE, it6505_irq_hdcp_done }, { BIT_INT_AUX_CMD_FAIL, it6505_irq_aux_cmd_fail }, { BIT_INT_HDCP_KSV_CHECK, it6505_irq_hdcp_ksv_check }, { BIT_INT_AUDIO_FIFO_ERROR, it6505_irq_audio_fifo_error }, { BIT_INT_LINK_TRAIN_FAIL, it6505_irq_link_train_fail }, - { BIT_INT_VID_FIFO_ERROR, it6505_irq_video_fifo_error }, - { BIT_INT_IO_FIFO_OVERFLOW, it6505_irq_io_latch_fifo_overflow }, }; int int_status[3], i; @@ -2546,6 +2570,7 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data) if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status)) irq_vec[i].handler(it6505); } + it6505_irq_video_handler(it6505, (unsigned int *)int_status); } pm_runtime_put_sync(dev); diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index 95fedc68b0ae..8476650c477c 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -574,8 +574,8 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi, u16 _m, best_m; u8 _s, best_s; - p_min = DIV_ROUND_UP(fin, (12 * MHZ)); - p_max = fin / (6 * MHZ); + p_min = DIV_ROUND_UP(fin, (driver_data->pll_fin_max * MHZ)); + p_max = fin / (driver_data->pll_fin_min * MHZ); for (_p = p_min; _p <= p_max; ++_p) { for (_s = 0; _s <= 5; ++_s) { diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 7f8e1cfbe19d..68831f4e502a 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -2929,7 +2929,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, /* FIXME: Actually do some real error handling here */ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); - if (ret <= 0) { + if (ret < 0) { drm_err(mgr->dev, "Sending link address failed with %d\n", ret); goto out; } @@ -2981,7 +2981,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, mutex_unlock(&mgr->lock); out: - if (ret <= 0) + if (ret < 0) mstb->link_address_sent = false; kfree(txmsg); return ret < 0 ? ret : changed; diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index 13cd754af311..77695339e4d4 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -90,7 +90,8 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper, sizes->surface_width, sizes->surface_height, sizes->surface_bpp); - format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, + sizes->surface_depth); buffer = drm_client_framebuffer_create(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c index 7ece67086cec..831b214975a5 100644 --- a/drivers/gpu/drm/drm_panic.c +++ b/drivers/gpu/drm/drm_panic.c @@ -15,7 +15,6 @@ #include <linux/types.h> #include <drm/drm_drv.h> -#include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_modeset_helper_vtables.h> @@ -194,40 +193,42 @@ static u32 convert_from_xrgb8888(u32 color, u32 format) /* * Blit & Fill */ +/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */ +static bool drm_panic_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y) +{ + return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0; +} + static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch, const u8 *sbuf8, unsigned int spitch, unsigned int height, unsigned int width, - u16 fg16, u16 bg16) + u16 fg16) { unsigned int y, x; - u16 val16; - for (y = 0; y < height; y++) { - for (x = 0; x < width; x++) { - val16 = (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) ? fg16 : bg16; - iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, val16); - } - } + for (y = 0; y < height; y++) + for (x = 0; x < width; x++) + if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y)) + iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16); } static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch, const u8 *sbuf8, unsigned int spitch, unsigned int height, unsigned int width, - u32 fg32, u32 bg32) + u32 fg32) { unsigned int y, x; - u32 val32; for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { u32 off = y * dpitch + x * 3; - val32 = (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) ? fg32 : bg32; - - /* write blue-green-red to output in little endianness */ - iosys_map_wr(dmap, off, u8, (val32 & 0x000000FF) >> 0); - iosys_map_wr(dmap, off + 1, u8, (val32 & 0x0000FF00) >> 8); - iosys_map_wr(dmap, off + 2, u8, (val32 & 0x00FF0000) >> 16); + if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y)) { + /* write blue-green-red to output in little endianness */ + iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0); + iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8); + iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16); + } } } } @@ -235,17 +236,14 @@ static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch, static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch, const u8 *sbuf8, unsigned int spitch, unsigned int height, unsigned int width, - u32 fg32, u32 bg32) + u32 fg32) { unsigned int y, x; - u32 val32; - for (y = 0; y < height; y++) { - for (x = 0; x < width; x++) { - val32 = (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) ? fg32 : bg32; - iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, val32); - } - } + for (y = 0; y < height; y++) + for (x = 0; x < width; x++) + if (drm_panic_is_pixel_fg(sbuf8, spitch, x, y)) + iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32); } /* @@ -257,7 +255,6 @@ static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch, * @height: height of the image to copy, in pixels * @width: width of the image to copy, in pixels * @fg_color: foreground color, in destination format - * @bg_color: background color, in destination format * @pixel_width: pixel width in bytes. * * This can be used to draw a font character, which is a monochrome image, to a @@ -266,21 +263,20 @@ static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch, static void drm_panic_blit(struct iosys_map *dmap, unsigned int dpitch, const u8 *sbuf8, unsigned int spitch, unsigned int height, unsigned int width, - u32 fg_color, u32 bg_color, - unsigned int pixel_width) + u32 fg_color, unsigned int pixel_width) { switch (pixel_width) { case 2: drm_panic_blit16(dmap, dpitch, sbuf8, spitch, - height, width, fg_color, bg_color); + height, width, fg_color); break; case 3: drm_panic_blit24(dmap, dpitch, sbuf8, spitch, - height, width, fg_color, bg_color); + height, width, fg_color); break; case 4: drm_panic_blit32(dmap, dpitch, sbuf8, spitch, - height, width, fg_color, bg_color); + height, width, fg_color); break; default: WARN_ONCE(1, "Can't blit with pixel width %d\n", pixel_width); @@ -381,8 +377,7 @@ static void draw_txt_rectangle(struct drm_scanout_buffer *sb, unsigned int msg_lines, bool centered, struct drm_rect *clip, - u32 fg_color, - u32 bg_color) + u32 color) { int i, j; const u8 *src; @@ -404,8 +399,7 @@ static void draw_txt_rectangle(struct drm_scanout_buffer *sb, for (j = 0; j < line_len; j++) { src = get_char_bitmap(font, msg[i].txt[j], font_pitch); drm_panic_blit(&dst, sb->pitch[0], src, font_pitch, - font->height, font->width, - fg_color, bg_color, px_width); + font->height, font->width, color, px_width); iosys_map_incr(&dst, font->width * px_width); } } @@ -444,10 +438,10 @@ static void draw_panic_static(struct drm_scanout_buffer *sb) bg_color, sb->format->cpp[0]); if ((r_msg.x1 >= drm_rect_width(&r_logo) || r_msg.y1 >= drm_rect_height(&r_logo)) && - drm_rect_width(&r_logo) < sb->width && drm_rect_height(&r_logo) < sb->height) { - draw_txt_rectangle(sb, font, logo, logo_lines, false, &r_logo, fg_color, bg_color); + drm_rect_width(&r_logo) <= sb->width && drm_rect_height(&r_logo) <= sb->height) { + draw_txt_rectangle(sb, font, logo, logo_lines, false, &r_logo, fg_color); } - draw_txt_rectangle(sb, font, panic_msg, msg_lines, true, &r_msg, fg_color, bg_color); + draw_txt_rectangle(sb, font, panic_msg, msg_lines, true, &r_msg, fg_color); } /* diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 71a6d2b1c80f..5c0c9d4e3be1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -355,9 +355,11 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj) static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) { - if (op & ETNA_PREP_READ) + op &= ETNA_PREP_READ | ETNA_PREP_WRITE; + + if (op == ETNA_PREP_READ) return DMA_FROM_DEVICE; - else if (op & ETNA_PREP_WRITE) + else if (op == ETNA_PREP_WRITE) return DMA_TO_DEVICE; else return DMA_BIDIRECTIONAL; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index c4b04b0dee16..62dcfdc7894d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -38,9 +38,6 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job u32 dma_addr; int change; - /* block scheduler */ - drm_sched_stop(&gpu->sched, sched_job); - /* * If the GPU managed to complete this jobs fence, the timout is * spurious. Bail out. @@ -63,6 +60,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job goto out_no_timeout; } + /* block scheduler */ + drm_sched_stop(&gpu->sched, sched_job); + if(sched_job) drm_sched_increase_karma(sched_job); @@ -76,8 +76,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job return DRM_GPU_SCHED_STAT_NOMINAL; out_no_timeout: - /* restart scheduler after GPU is usable again */ - drm_sched_start(&gpu->sched, true); + list_add(&sched_job->list, &sched_job->sched->pending_list); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index f08a6803dc18..3adc2c9ab72d 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -311,6 +311,9 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector) if (mode_dev->panel_fixed_mode != NULL) { struct drm_display_mode *mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); + if (!mode) + return 0; + drm_mode_probed_add(connector, mode); return 1; } diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 8486de230ec9..8d1be94a443b 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -504,6 +504,9 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector) if (mode_dev->panel_fixed_mode != NULL) { struct drm_display_mode *mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); + if (!mode) + return 0; + drm_mode_probed_add(connector, mode); return 1; } diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index ccaa4cb2809b..bddcc9edeab4 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -251,9 +251,10 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, drm_printf(&p, "sdp split: %s\n", str_enabled_disabled(pipe_config->sdp_split_enable)); - drm_printf(&p, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n", - str_enabled_disabled(pipe_config->has_psr), - str_enabled_disabled(pipe_config->has_psr2), + drm_printf(&p, "psr: %s, selective update: %s, panel replay: %s, selective fetch: %s\n", + str_enabled_disabled(pipe_config->has_psr && + !pipe_config->has_panel_replay), + str_enabled_disabled(pipe_config->has_sel_update), str_enabled_disabled(pipe_config->has_panel_replay), str_enabled_disabled(pipe_config->enable_psr2_sel_fetch)); } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 273323f30ae2..e53d3e900b3e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -5318,9 +5318,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, * Panel replay has to be enabled before link training. PSR doesn't have * this requirement -> check these only if using panel replay */ - if (current_config->has_panel_replay || pipe_config->has_panel_replay) { + if (current_config->active_planes && + (current_config->has_panel_replay || + pipe_config->has_panel_replay)) { PIPE_CONF_CHECK_BOOL(has_psr); - PIPE_CONF_CHECK_BOOL(has_psr2); + PIPE_CONF_CHECK_BOOL(has_sel_update); PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch); PIPE_CONF_CHECK_BOOL(enable_psr2_su_region_et); PIPE_CONF_CHECK_BOOL(has_panel_replay); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 62f7a30c37dc..6747c10da298 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1189,7 +1189,7 @@ struct intel_crtc_state { /* PSR is supported but might not be enabled due the lack of enabled planes */ bool has_psr; - bool has_psr2; + bool has_sel_update; bool enable_psr2_sel_fetch; bool enable_psr2_su_region_et; bool req_psr2_sdp_prior_scanline; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 5b3b6ae1e3d7..9c9e060476c7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2664,7 +2664,7 @@ static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, vsc); - } else if (crtc_state->has_psr2) { + } else if (crtc_state->has_sel_update) { /* * [PSR2 without colorimetry] * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 @@ -5267,6 +5267,8 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, !intel_dp_mst_is_master_trans(crtc_state)) continue; + intel_dp->link_trained = false; + intel_dp_check_frl_training(intel_dp); intel_dp_pcon_dsc_configure(intel_dp, crtc_state); intel_dp_start_link_train(intel_dp, crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 947575140059..8cfc55f3d98e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -114,10 +114,24 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1; } -static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp) +{ + return intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] == + DP_PHY_REPEATER_MODE_TRANSPARENT; +} + +/* + * Read the LTTPR common capabilities and switch the LTTPR PHYs to + * non-transparent mode if this is supported. Preserve the + * transparent/non-transparent mode on an active link. + * + * Return the number of detected LTTPRs in non-transparent mode or 0 if the + * LTTPRs are in transparent mode or the detection failed. + */ +static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { int lttpr_count; - int i; if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd)) return 0; @@ -131,6 +145,19 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI if (lttpr_count == 0) return 0; + /* + * Don't change the mode on an active link, to prevent a loss of link + * synchronization. See DP Standard v2.0 3.6.7. about the LTTPR + * resetting its internal state when the mode is changed from + * non-transparent to transparent. + */ + if (intel_dp->link_trained) { + if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp)) + goto out_reset_lttpr_count; + + return lttpr_count; + } + /* * See DP Standard v2.0 3.6.6.1. about the explicit disabling of * non-transparent mode and the disable->enable non-transparent mode @@ -151,11 +178,25 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n"); intel_dp_set_lttpr_transparent_mode(intel_dp, true); - intel_dp_reset_lttpr_count(intel_dp); - return 0; + goto out_reset_lttpr_count; } + return lttpr_count; + +out_reset_lttpr_count: + intel_dp_reset_lttpr_count(intel_dp); + + return 0; +} + +static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + int lttpr_count; + int i; + + lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd); + for (i = 0; i < lttpr_count; i++) intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i)); @@ -1372,10 +1413,10 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp, { struct drm_i915_private *i915 = dp_to_i915(intel_dp); bool passed; - /* - * TODO: Reiniting LTTPRs here won't be needed once proper connector - * HW state readout is added. + * Reinit the LTTPRs here to ensure that they are switched to + * non-transparent mode. During an earlier LTTPR detection this + * could've been prevented by an active link. */ int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 151dcd0c45b6..984f13d8c0c8 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1251,7 +1251,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, * Recommendation is to keep this combination disabled * Bspec: 50422 HSD: 14010260002 */ - if (IS_DISPLAY_VER(i915, 12, 14) && crtc_state->has_psr2) { + if (IS_DISPLAY_VER(i915, 12, 14) && crtc_state->has_sel_update) { plane_state->no_fbc_reason = "PSR2 enabled"; return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index f5b33335a9ae..3c7da862222b 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -651,7 +651,7 @@ void intel_psr_enable_sink(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 dpcd_val = DP_PSR_ENABLE; - if (crtc_state->has_psr2) { + if (crtc_state->has_sel_update) { /* Enable ALPM at sink for psr2 */ if (!crtc_state->has_panel_replay) { drm_dp_dpcd_writeb(&intel_dp->aux, @@ -659,7 +659,7 @@ void intel_psr_enable_sink(struct intel_dp *intel_dp, DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); - if (psr2_su_region_et_valid(intel_dp)) + if (crtc_state->enable_psr2_su_region_et) dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET; } @@ -1639,7 +1639,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, if (!crtc_state->has_psr) return; - crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); + crtc_state->has_sel_update = intel_psr2_config_valid(intel_dp, crtc_state); } void intel_psr_get_config(struct intel_encoder *encoder, @@ -1672,7 +1672,7 @@ void intel_psr_get_config(struct intel_encoder *encoder, pipe_config->has_psr = true; } - pipe_config->has_psr2 = intel_dp->psr.psr2_enabled; + pipe_config->has_sel_update = intel_dp->psr.psr2_enabled; pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); if (!intel_dp->psr.psr2_enabled) @@ -1960,7 +1960,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled); - intel_dp->psr.psr2_enabled = crtc_state->has_psr2; + intel_dp->psr.psr2_enabled = crtc_state->has_sel_update; intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay; intel_dp->psr.busy_frontbuffer_bits = 0; intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; @@ -2484,7 +2484,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, crtc_state->psr2_su_area.x1 = 0; crtc_state->psr2_su_area.y1 = -1; - crtc_state->psr2_su_area.x2 = INT_MAX; + crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src); crtc_state->psr2_su_area.y2 = -1; /* @@ -2688,7 +2688,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state); needs_to_disable |= !new_crtc_state->has_psr; needs_to_disable |= !new_crtc_state->active_planes; - needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled; + needs_to_disable |= new_crtc_state->has_sel_update != psr->psr2_enabled; needs_to_disable |= DISPLAY_VER(i915) < 11 && new_crtc_state->wm_level_disabled; @@ -3694,16 +3694,9 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) "reserved", "sink internal error", }; - static const char * const panel_replay_status[] = { - "Sink device frame is locked to the Source device", - "Sink device is coasting, using the VTotal target", - "Sink device is governing the frame rate (frame rate unlock is granted)", - "Sink device in the process of re-locking with the Source device", - }; const char *str; int ret; u8 status, error_status; - u32 idx; if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) { seq_puts(m, "PSR/Panel-Replay Unsupported\n"); @@ -3717,16 +3710,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) if (ret) return ret; - str = "unknown"; - if (intel_dp->psr.panel_replay_enabled) { - idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT; - if (idx < ARRAY_SIZE(panel_replay_status)) - str = panel_replay_status[idx]; - } else if (intel_dp->psr.enabled) { - idx = status & DP_PSR_SINK_STATE_MASK; - if (idx < ARRAY_SIZE(sink_status)) - str = sink_status[idx]; - } + status &= DP_PSR_SINK_STATE_MASK; + if (status < ARRAY_SIZE(sink_status)) + str = sink_status[status]; + else + str = "unknown"; seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str); diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 21829439e686..72090f52fb85 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -3315,11 +3315,7 @@ static void remove_from_engine(struct i915_request *rq) static bool can_preempt(struct intel_engine_cs *engine) { - if (GRAPHICS_VER(engine->i915) > 8) - return true; - - /* GPGPU on bdw requires extra w/a; not implemented */ - return engine->class != RENDER_CLASS; + return GRAPHICS_VER(engine->i915) > 8; } static void kick_execlists(const struct i915_request *rq, int prio) diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c index 17b036411292..be66d94be361 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c @@ -514,29 +514,42 @@ static bool mtk_ddp_comp_find(struct device *dev, return false; } -static unsigned int mtk_ddp_comp_find_in_route(struct device *dev, - const struct mtk_drm_route *routes, - unsigned int num_routes, - struct mtk_ddp_comp *ddp_comp) +static int mtk_ddp_comp_find_in_route(struct device *dev, + const struct mtk_drm_route *routes, + unsigned int num_routes, + struct mtk_ddp_comp *ddp_comp) { - int ret; unsigned int i; - if (!routes) { - ret = -EINVAL; - goto err; - } + if (!routes) + return -EINVAL; for (i = 0; i < num_routes; i++) if (dev == ddp_comp[routes[i].route_ddp].dev) return BIT(routes[i].crtc_id); - ret = -ENODEV; -err: + return -ENODEV; +} - DRM_INFO("Failed to find comp in ddp table, ret = %d\n", ret); +static bool mtk_ddp_path_available(const unsigned int *path, + unsigned int path_len, + struct device_node **comp_node) +{ + unsigned int i; - return 0; + if (!path || !path_len) + return false; + + for (i = 0U; i < path_len; i++) { + /* OVL_ADAPTOR doesn't have a device node */ + if (path[i] == DDP_COMPONENT_DRM_OVL_ADAPTOR) + continue; + + if (!comp_node[path[i]]) + return false; + } + + return true; } int mtk_ddp_comp_get_id(struct device_node *node, @@ -554,31 +567,53 @@ int mtk_ddp_comp_get_id(struct device_node *node, return -EINVAL; } -unsigned int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev) +int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev) { struct mtk_drm_private *private = drm->dev_private; - unsigned int ret = 0; - - if (mtk_ddp_comp_find(dev, - private->data->main_path, - private->data->main_len, - private->ddp_comp)) - ret = BIT(0); - else if (mtk_ddp_comp_find(dev, - private->data->ext_path, - private->data->ext_len, - private->ddp_comp)) - ret = BIT(1); - else if (mtk_ddp_comp_find(dev, - private->data->third_path, - private->data->third_len, - private->ddp_comp)) - ret = BIT(2); - else - ret = mtk_ddp_comp_find_in_route(dev, - private->data->conn_routes, - private->data->num_conn_routes, - private->ddp_comp); + const struct mtk_mmsys_driver_data *data; + struct mtk_drm_private *priv_n; + int i = 0, j; + int ret; + + for (j = 0; j < private->data->mmsys_dev_num; j++) { + priv_n = private->all_drm_private[j]; + data = priv_n->data; + + if (mtk_ddp_path_available(data->main_path, data->main_len, + priv_n->comp_node)) { + if (mtk_ddp_comp_find(dev, data->main_path, + data->main_len, + priv_n->ddp_comp)) + return BIT(i); + i++; + } + + if (mtk_ddp_path_available(data->ext_path, data->ext_len, + priv_n->comp_node)) { + if (mtk_ddp_comp_find(dev, data->ext_path, + data->ext_len, + priv_n->ddp_comp)) + return BIT(i); + i++; + } + + if (mtk_ddp_path_available(data->third_path, data->third_len, + priv_n->comp_node)) { + if (mtk_ddp_comp_find(dev, data->third_path, + data->third_len, + priv_n->ddp_comp)) + return BIT(i); + i++; + } + } + + ret = mtk_ddp_comp_find_in_route(dev, + private->data->conn_routes, + private->data->num_conn_routes, + private->ddp_comp); + + if (ret < 0) + DRM_INFO("Failed to find comp in ddp table, ret = %d\n", ret); return ret; } @@ -593,7 +628,7 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp, int ret; #endif - if (comp_id < 0 || comp_id >= DDP_COMPONENT_DRM_ID_MAX) + if (comp_id >= DDP_COMPONENT_DRM_ID_MAX) return -EINVAL; type = mtk_ddp_matches[comp_id].type; diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h index 26236691ce4c..ecf6dc283cd7 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h @@ -192,7 +192,11 @@ unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp) if (comp->funcs && comp->funcs->supported_rotations) return comp->funcs->supported_rotations(comp->dev); - return 0; + /* + * In order to pass IGT tests, DRM_MODE_ROTATE_0 is required when + * rotation is not supported. + */ + return DRM_MODE_ROTATE_0; } static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) @@ -326,7 +330,7 @@ static inline void mtk_ddp_comp_encoder_index_set(struct mtk_ddp_comp *comp) int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type); -unsigned int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev); +int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev); int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp, unsigned int comp_id); enum mtk_ddp_comp_type mtk_ddp_comp_get_type(unsigned int comp_id); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index b552a02d7eae..26b598b9f71f 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -38,6 +38,7 @@ #define DISP_REG_OVL_PITCH_MSB(n) (0x0040 + 0x20 * (n)) #define OVL_PITCH_MSB_2ND_SUBBUF BIT(16) #define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n)) +#define OVL_CONST_BLEND BIT(28) #define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n)) #define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n)) #define DISP_REG_OVL_ADDR_MT2701 0x0040 @@ -71,6 +72,8 @@ #define OVL_CON_VIRT_FLIP BIT(9) #define OVL_CON_HORZ_FLIP BIT(10) +#define OVL_COLOR_ALPHA GENMASK(31, 24) + static const u32 mt8173_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, @@ -273,7 +276,13 @@ void mtk_ovl_config(struct device *dev, unsigned int w, if (w != 0 && h != 0) mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_ROI_SIZE); - mtk_ddp_write_relaxed(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_ROI_BGCLR); + + /* + * The background color must be opaque black (ARGB), + * otherwise the alpha blending will have no effect + */ + mtk_ddp_write_relaxed(cmdq_pkt, OVL_COLOR_ALPHA, &ovl->cmdq_reg, + ovl->regs, DISP_REG_OVL_ROI_BGCLR); mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST); mtk_ddp_write(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST); @@ -296,27 +305,20 @@ int mtk_ovl_layer_check(struct device *dev, unsigned int idx, struct mtk_plane_state *mtk_state) { struct drm_plane_state *state = &mtk_state->base; - unsigned int rotation = 0; - rotation = drm_rotation_simplify(state->rotation, - DRM_MODE_ROTATE_0 | - DRM_MODE_REFLECT_X | - DRM_MODE_REFLECT_Y); - rotation &= ~DRM_MODE_ROTATE_0; - - /* We can only do reflection, not rotation */ - if ((rotation & DRM_MODE_ROTATE_MASK) != 0) + /* check if any unsupported rotation is set */ + if (state->rotation & ~mtk_ovl_supported_rotations(dev)) return -EINVAL; /* * TODO: Rotating/reflecting YUV buffers is not supported at this time. * Only RGB[AX] variants are supported. + * Since DRM_MODE_ROTATE_0 means "no rotation", we should not + * reject layers with this property. */ - if (state->fb->format->is_yuv && rotation != 0) + if (state->fb->format->is_yuv && (state->rotation & ~DRM_MODE_ROTATE_0)) return -EINVAL; - state->rotation = rotation; - return 0; } @@ -407,6 +409,7 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx, unsigned int fmt = pending->format; unsigned int offset = (pending->y << 16) | pending->x; unsigned int src_size = (pending->height << 16) | pending->width; + unsigned int ignore_pixel_alpha = 0; unsigned int con; bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR; union overlay_pitch { @@ -428,6 +431,14 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx, if (state->base.fb && state->base.fb->format->has_alpha) con |= OVL_CON_AEN | OVL_CON_ALPHA; + /* CONST_BLD must be enabled for XRGB formats although the alpha channel + * can be ignored, or OVL will still read the value from memory. + * For RGB888 related formats, whether CONST_BLD is enabled or not won't + * affect the result. Therefore we use !has_alpha as the condition. + */ + if (state->base.fb && !state->base.fb->format->has_alpha) + ignore_pixel_alpha = OVL_CONST_BLEND; + if (pending->rotation & DRM_MODE_REFLECT_Y) { con |= OVL_CON_VIRT_FLIP; addr += (pending->height - 1) * pending->pitch; @@ -443,8 +454,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx, mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_CON(idx)); - mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb, &ovl->cmdq_reg, ovl->regs, - DISP_REG_OVL_PITCH(idx)); + mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb | ignore_pixel_alpha, + &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx)); mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_SRC_SIZE(idx)); mtk_ddp_write_relaxed(cmdq_pkt, offset, &ovl->cmdq_reg, ovl->regs, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c index 02dd7dcdfedb..2b62d6475918 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c @@ -158,7 +158,7 @@ void mtk_ovl_adaptor_layer_config(struct device *dev, unsigned int idx, merge = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_MERGE0 + idx]; ethdr = ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]; - if (!pending->enable) { + if (!pending->enable || !pending->width || !pending->height) { mtk_merge_stop_cmdq(merge, cmdq_pkt); mtk_mdp_rdma_stop(rdma_l, cmdq_pkt); mtk_mdp_rdma_stop(rdma_r, cmdq_pkt); diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index 536366956447..ada12927bbac 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -2073,9 +2073,15 @@ static const struct drm_edid *mtk_dp_edid_read(struct drm_bridge *bridge, */ const struct edid *edid = drm_edid_raw(drm_edid); struct cea_sad *sads; + int ret; - audio_caps->sad_count = drm_edid_to_sad(edid, &sads); - kfree(sads); + ret = drm_edid_to_sad(edid, &sads); + /* Ignore any errors */ + if (ret < 0) + ret = 0; + if (ret) + kfree(sads); + audio_caps->sad_count = ret; /* * FIXME: This should use connector->display_info.has_audio from diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index bfe8653005db..a08d20654954 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -805,7 +805,10 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data) return ret; } - dpi->encoder.possible_crtcs = mtk_find_possible_crtcs(drm_dev, dpi->dev); + ret = mtk_find_possible_crtcs(drm_dev, dpi->dev); + if (ret < 0) + goto err_cleanup; + dpi->encoder.possible_crtcs = ret; ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index de811e2265da..56f409ad7f39 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -294,6 +294,9 @@ static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = { .conn_routes = mt8188_mtk_ddp_main_routes, .num_conn_routes = ARRAY_SIZE(mt8188_mtk_ddp_main_routes), .mmsys_dev_num = 2, + .max_width = 8191, + .min_width = 1, + .min_height = 1, }; static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { @@ -308,6 +311,9 @@ static const struct mtk_mmsys_driver_data mt8195_vdosys0_driver_data = { .main_path = mt8195_mtk_ddp_main, .main_len = ARRAY_SIZE(mt8195_mtk_ddp_main), .mmsys_dev_num = 2, + .max_width = 8191, + .min_width = 1, + .min_height = 1, }; static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = { @@ -315,6 +321,9 @@ static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = { .ext_len = ARRAY_SIZE(mt8195_mtk_ddp_ext), .mmsys_id = 1, .mmsys_dev_num = 2, + .max_width = 8191, + .min_width = 2, /* 2-pixel align when ethdr is bypassed */ + .min_height = 1, }; static const struct of_device_id mtk_drm_of_ids[] = { @@ -493,6 +502,15 @@ static int mtk_drm_kms_init(struct drm_device *drm) for (j = 0; j < private->data->mmsys_dev_num; j++) { priv_n = private->all_drm_private[j]; + if (priv_n->data->max_width) + drm->mode_config.max_width = priv_n->data->max_width; + + if (priv_n->data->min_width) + drm->mode_config.min_width = priv_n->data->min_width; + + if (priv_n->data->min_height) + drm->mode_config.min_height = priv_n->data->min_height; + if (i == CRTC_MAIN && priv_n->data->main_len) { ret = mtk_crtc_create(drm, priv_n->data->main_path, priv_n->data->main_len, j, @@ -520,6 +538,10 @@ static int mtk_drm_kms_init(struct drm_device *drm) } } + /* IGT will check if the cursor size is configured */ + drm->mode_config.cursor_width = drm->mode_config.max_width; + drm->mode_config.cursor_height = drm->mode_config.max_height; + /* Use OVL device for all DMA memory allocations */ crtc = drm_crtc_from_index(drm, 0); if (crtc) @@ -743,6 +765,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = { .data = (void *)MTK_DISP_OVL }, { .compatible = "mediatek,mt8192-disp-ovl", .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8195-disp-ovl", + .data = (void *)MTK_DISP_OVL }, { .compatible = "mediatek,mt8183-disp-ovl-2l", .data = (void *)MTK_DISP_OVL_2L }, { .compatible = "mediatek,mt8192-disp-ovl-2l", diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index 78d698ede1bf..ce897984de51 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -46,6 +46,10 @@ struct mtk_mmsys_driver_data { bool shadow_register; unsigned int mmsys_id; unsigned int mmsys_dev_num; + + u16 max_width; + u16 min_width; + u16 min_height; }; struct mtk_drm_private { diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index c255559cc56e..b6e3c011a12d 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -837,7 +837,10 @@ static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) return ret; } - dsi->encoder.possible_crtcs = mtk_find_possible_crtcs(drm, dsi->host.dev); + ret = mtk_find_possible_crtcs(drm, dsi->host.dev); + if (ret < 0) + goto err_cleanup_encoder; + dsi->encoder.possible_crtcs = ret; ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c index 156c6ff547e8..bf5826b7e776 100644 --- a/drivers/gpu/drm/mediatek/mtk_ethdr.c +++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c @@ -50,7 +50,6 @@ #define MIXER_INX_MODE_BYPASS 0 #define MIXER_INX_MODE_EVEN_EXTEND 1 -#define DEFAULT_9BIT_ALPHA 0x100 #define MIXER_ALPHA_AEN BIT(8) #define MIXER_ALPHA 0xff #define ETHDR_CLK_NUM 13 @@ -154,13 +153,19 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, unsigned int offset = (pending->x & 1) << 31 | pending->y << 16 | pending->x; unsigned int align_width = ALIGN_DOWN(pending->width, 2); unsigned int alpha_con = 0; + bool replace_src_a = false; dev_dbg(dev, "%s+ idx:%d", __func__, idx); if (idx >= 4) return; - if (!pending->enable) { + if (!pending->enable || !pending->width || !pending->height) { + /* + * instead of disabling layer with MIX_SRC_CON directly + * set the size to 0 to avoid screen shift due to mixer + * mode switch (hardware behavior) + */ mtk_ddp_write(cmdq_pkt, 0, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_SIZE(idx)); return; } @@ -168,8 +173,16 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, if (state->base.fb && state->base.fb->format->has_alpha) alpha_con = MIXER_ALPHA_AEN | MIXER_ALPHA; - mtk_mmsys_mixer_in_config(priv->mmsys_dev, idx + 1, alpha_con ? false : true, - DEFAULT_9BIT_ALPHA, + if (state->base.fb && !state->base.fb->format->has_alpha) { + /* + * Mixer doesn't support CONST_BLD mode, + * use a trick to make the output equivalent + */ + replace_src_a = true; + } + + mtk_mmsys_mixer_in_config(priv->mmsys_dev, idx + 1, replace_src_a, + MIXER_ALPHA, pending->x & 1 ? MIXER_INX_MODE_EVEN_EXTEND : MIXER_INX_MODE_BYPASS, align_width / 2 - 1, cmdq_pkt); diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c index 4625deb21d40..1723d4333f37 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -227,6 +227,8 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane, plane->state->src_y = new_state->src_y; plane->state->src_h = new_state->src_h; plane->state->src_w = new_state->src_w; + plane->state->dst.x1 = new_state->dst.x1; + plane->state->dst.y1 = new_state->dst.y1; mtk_plane_update_new_state(new_state, new_plane_state); swap(plane->state->fb, new_state->fb); @@ -336,7 +338,7 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, return err; } - if (supported_rotations & ~DRM_MODE_ROTATE_0) { + if (supported_rotations) { err = drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, supported_rotations); diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 17a5cca007e2..4bd0baa2a4f5 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -250,29 +250,20 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) if (ret) goto free_drm; ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - goto free_drm; - } + if (ret) + goto free_canvas_osd1; ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); - goto free_drm; - } + if (ret) + goto free_canvas_vd1_0; ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); - goto free_drm; - } + if (ret) + goto free_canvas_vd1_1; priv->vsync_irq = platform_get_irq(pdev, 0); ret = drm_vblank_init(drm, 1); if (ret) - goto free_drm; + goto free_canvas_vd1_2; /* Assign limits per soc revision/package */ for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) { @@ -288,11 +279,11 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) */ ret = drm_aperture_remove_framebuffers(&meson_driver); if (ret) - goto free_drm; + goto free_canvas_vd1_2; ret = drmm_mode_config_init(drm); if (ret) - goto free_drm; + goto free_canvas_vd1_2; drm->mode_config.max_width = 3840; drm->mode_config.max_height = 2160; drm->mode_config.funcs = &meson_mode_config_funcs; @@ -307,7 +298,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) if (priv->afbcd.ops) { ret = priv->afbcd.ops->init(priv); if (ret) - goto free_drm; + goto free_canvas_vd1_2; } /* Encoder Initialization */ @@ -371,6 +362,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) exit_afbcd: if (priv->afbcd.ops) priv->afbcd.ops->exit(priv); +free_canvas_vd1_2: + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2); +free_canvas_vd1_1: + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); +free_canvas_vd1_0: + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); +free_canvas_osd1: + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); free_drm: drm_dev_put(drm); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 973872ad0474..5383aff84830 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1409,7 +1409,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) if (adreno_is_a702(gpu)) { gpu->ubwc_config.highest_bank_bit = 14; gpu->ubwc_config.min_acc_len = 1; - gpu->ubwc_config.ubwc_mode = 2; + gpu->ubwc_config.ubwc_mode = 0; } } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 0a7717a4fc2f..789a11416f7a 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -8,19 +8,16 @@ #include "a6xx_gpu_state.h" #include "a6xx_gmu.xml.h" -/* Ignore diagnostics about register tables that we aren't using yet. We don't - * want to modify these headers too much from their original source. - */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-variable" -#pragma GCC diagnostic ignored "-Wunused-const-variable" +static const unsigned int *gen7_0_0_external_core_regs[] __always_unused; +static const unsigned int *gen7_2_0_external_core_regs[] __always_unused; +static const unsigned int *gen7_9_0_external_core_regs[] __always_unused; +static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused; +static const u32 gen7_9_0_cx_debugbus_blocks[] __always_unused; #include "adreno_gen7_0_0_snapshot.h" #include "adreno_gen7_2_0_snapshot.h" #include "adreno_gen7_9_0_snapshot.h" -#pragma GCC diagnostic pop - struct a6xx_gpu_state_obj { const void *handle; u32 *data; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 119f3ea50a7c..697ad4a64051 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -428,7 +428,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, return -EWOULDBLOCK; } - if (irq_idx < 0) { + if (irq_idx == 0) { DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n", DRMID(phys_enc->parent), func); return 0; @@ -1200,6 +1200,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); phys->cached_mode = crtc_state->adjusted_mode; + if (phys->ops.atomic_mode_set) + phys->ops.atomic_mode_set(phys, crtc_state, conn_state); } } @@ -1741,8 +1743,7 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) phys = dpu_enc->phys_encs[i]; ctl = phys->hw_ctl; - if (ctl->ops.clear_pending_flush) - ctl->ops.clear_pending_flush(ctl); + ctl->ops.clear_pending_flush(ctl); /* update only for command mode primary ctl */ if ((phys == dpu_enc->cur_master) && diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h index 002e89cc1705..30470cd15a48 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h @@ -69,6 +69,8 @@ struct dpu_encoder_phys; * @is_master: Whether this phys_enc is the current master * encoder. Can be switched at enable time. Based * on split_role and current mode (CMD/VID). + * @atomic_mode_set: DRM Call. Set a DRM mode. + * This likely caches the mode, for use at enable. * @enable: DRM Call. Enable a DRM mode. * @disable: DRM Call. Disable mode. * @control_vblank_irq Register/Deregister for VBLANK IRQ @@ -93,6 +95,9 @@ struct dpu_encoder_phys; struct dpu_encoder_phys_ops { void (*prepare_commit)(struct dpu_encoder_phys *encoder); bool (*is_master)(struct dpu_encoder_phys *encoder); + void (*atomic_mode_set)(struct dpu_encoder_phys *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); void (*enable)(struct dpu_encoder_phys *encoder); void (*disable)(struct dpu_encoder_phys *encoder); int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 489be1c0c704..95cd39b49668 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -142,6 +142,23 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg) dpu_encoder_underrun_callback(phys_enc->parent, phys_enc); } +static void dpu_encoder_phys_cmd_atomic_mode_set( + struct dpu_encoder_phys *phys_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start; + + phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done; + + if (phys_enc->has_intf_te) + phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr; + else + phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr; + + phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun; +} + static int _dpu_encoder_phys_cmd_handle_ppdone_timeout( struct dpu_encoder_phys *phys_enc) { @@ -280,14 +297,6 @@ static void dpu_encoder_phys_cmd_irq_enable(struct dpu_encoder_phys *phys_enc) phys_enc->hw_pp->idx - PINGPONG_0, phys_enc->vblank_refcount); - phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start; - phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done; - - if (phys_enc->has_intf_te) - phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr; - else - phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr; - dpu_core_irq_register_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_PINGPONG], dpu_encoder_phys_cmd_pp_tx_done_irq, @@ -318,10 +327,6 @@ static void dpu_encoder_phys_cmd_irq_disable(struct dpu_encoder_phys *phys_enc) dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_UNDERRUN]); dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false); dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_PINGPONG]); - - phys_enc->irq[INTR_IDX_CTL_START] = 0; - phys_enc->irq[INTR_IDX_PINGPONG] = 0; - phys_enc->irq[INTR_IDX_RDPTR] = 0; } static void dpu_encoder_phys_cmd_tearcheck_config( @@ -698,6 +703,7 @@ static void dpu_encoder_phys_cmd_init_ops( struct dpu_encoder_phys_ops *ops) { ops->is_master = dpu_encoder_phys_cmd_is_master; + ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set; ops->enable = dpu_encoder_phys_cmd_enable; ops->disable = dpu_encoder_phys_cmd_disable; ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq; @@ -736,8 +742,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, dpu_encoder_phys_cmd_init_ops(&phys_enc->ops); phys_enc->intf_mode = INTF_MODE_CMD; - phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun; - cmd_enc->stream_sel = 0; if (!phys_enc->hw_intf) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index ef69c2f408c3..636a97432d51 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -356,6 +356,16 @@ static bool dpu_encoder_phys_vid_needs_single_flush( return phys_enc->split_role != ENC_ROLE_SOLO; } +static void dpu_encoder_phys_vid_atomic_mode_set( + struct dpu_encoder_phys *phys_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync; + + phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun; +} + static int dpu_encoder_phys_vid_control_vblank_irq( struct dpu_encoder_phys *phys_enc, bool enable) @@ -699,6 +709,7 @@ static int dpu_encoder_phys_vid_get_frame_count( static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) { ops->is_master = dpu_encoder_phys_vid_is_master; + ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set; ops->enable = dpu_encoder_phys_vid_enable; ops->disable = dpu_encoder_phys_vid_disable; ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq; @@ -737,8 +748,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, dpu_encoder_phys_vid_init_ops(&phys_enc->ops); phys_enc->intf_mode = INTF_MODE_VIDEO; - phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync; - phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun; DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->hw_intf->idx); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index d3ea91c1d7d2..882c717859ce 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -404,6 +404,15 @@ static void dpu_encoder_phys_wb_irq_disable(struct dpu_encoder_phys *phys) dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]); } +static void dpu_encoder_phys_wb_atomic_mode_set( + struct dpu_encoder_phys *phys_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + + phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done; +} + static void _dpu_encoder_phys_wb_handle_wbdone_timeout( struct dpu_encoder_phys *phys_enc) { @@ -529,8 +538,7 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc) } /* reset h/w before final flush */ - if (phys_enc->hw_ctl->ops.clear_pending_flush) - phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl); + phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl); /* * New CTL reset sequence from 5.0 MDP onwards. @@ -640,6 +648,7 @@ static bool dpu_encoder_phys_wb_is_valid_for_commit(struct dpu_encoder_phys *phy static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops) { ops->is_master = dpu_encoder_phys_wb_is_master; + ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set; ops->enable = dpu_encoder_phys_wb_enable; ops->disable = dpu_encoder_phys_wb_disable; ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done; @@ -685,7 +694,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev, dpu_encoder_phys_wb_init_ops(&phys_enc->ops); phys_enc->intf_mode = INTF_MODE_WB_LINE; - phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done; atomic_set(&wb_enc->wbirq_refcount, 0); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index f2b6eac7601d..9b72977feafa 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -220,12 +220,9 @@ static const u32 wb2_formats_rgb[] = { DRM_FORMAT_RGBA4444, DRM_FORMAT_RGBX4444, DRM_FORMAT_XRGB4444, - DRM_FORMAT_BGR565, DRM_FORMAT_BGR888, - DRM_FORMAT_ABGR8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRX8888, - DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR1555, DRM_FORMAT_BGRA5551, DRM_FORMAT_XBGR1555, @@ -254,12 +251,9 @@ static const u32 wb2_formats_rgb_yuv[] = { DRM_FORMAT_RGBA4444, DRM_FORMAT_RGBX4444, DRM_FORMAT_XRGB4444, - DRM_FORMAT_BGR565, DRM_FORMAT_BGR888, - DRM_FORMAT_ABGR8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRX8888, - DRM_FORMAT_XBGR8888, DRM_FORMAT_ABGR1555, DRM_FORMAT_BGRA5551, DRM_FORMAT_XBGR1555, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h index ef56280bea93..4401fdc0f3e4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h @@ -83,7 +83,8 @@ struct dpu_hw_ctl_ops { /** * Clear the value of the cached pending_flush_mask - * No effect on hardware + * No effect on hardware. + * Required to be implemented. * @ctx : ctl path ctx pointer */ void (*clear_pending_flush)(struct dpu_hw_ctl *ctx); diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index da46a433bf74..00dfafbebe0e 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -513,7 +513,10 @@ static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux, aux = container_of(dp_aux, struct dp_aux_private, dp_aux); - pm_runtime_get_sync(aux->dev); + ret = pm_runtime_resume_and_get(aux->dev); + if (ret) + return ret; + ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us); pm_runtime_put_sync(aux->dev); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index a50f4dda5941..7252d36687e6 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -754,6 +754,8 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host, data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags)); data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt)); data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel); + if (msm_dsi_host_is_wide_bus_enabled(&msm_host->base)) + data |= DSI_VID_CFG0_DATABUS_WIDEN; dsi_write(msm_host, REG_DSI_VID_CFG0, data); /* Do not swap RGB colors */ @@ -778,7 +780,6 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host, if (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_3) data |= DSI_CMD_MODE_MDP_CTRL2_BURST_MODE; - /* TODO: Allow for video-mode support once tested/fixed */ if (msm_dsi_host_is_wide_bus_enabled(&msm_host->base)) data |= DSI_CMD_MODE_MDP_CTRL2_DATABUS_WIDEN; @@ -856,6 +857,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod u32 slice_per_intf, total_bytes_per_intf; u32 pkt_per_line; u32 eol_byte_num; + u32 bytes_per_pkt; /* first calculate dsc parameters and then program * compress mode registers @@ -863,6 +865,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay); total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf; + bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */ eol_byte_num = total_bytes_per_intf % 3; @@ -900,6 +903,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl); dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2); } else { + reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_WC(bytes_per_pkt); dsi_write(msm_host, REG_DSI_VIDEO_COMPRESSION_MODE_CTRL, reg); } } diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 0ffe8f8c01de..83c604ba3ee1 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -1507,7 +1507,11 @@ static int boe_panel_prepare(struct drm_panel *panel) usleep_range(10000, 11000); if (boe->desc->lp11_before_reset) { - mipi_dsi_dcs_nop(boe->dsi); + ret = mipi_dsi_dcs_nop(boe->dsi); + if (ret < 0) { + dev_err(&boe->dsi->dev, "Failed to send NOP: %d\n", ret); + goto poweroff; + } usleep_range(1000, 2000); } gpiod_set_value(boe->enable_gpio, 1); @@ -1528,13 +1532,13 @@ static int boe_panel_prepare(struct drm_panel *panel) return 0; poweroff: + gpiod_set_value(boe->enable_gpio, 0); regulator_disable(boe->avee); poweroffavdd: regulator_disable(boe->avdd); poweroff1v8: usleep_range(5000, 7000); regulator_disable(boe->pp1800); - gpiod_set_value(boe->enable_gpio, 0); return ret; } diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c index ff0dc08b9829..cb9f46e853de 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx8394.c +++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c @@ -370,8 +370,7 @@ static int hx8394_enable(struct drm_panel *panel) sleep_in: /* This will probably fail, but let's try orderly power off anyway. */ - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (!ret) + if (!mipi_dsi_dcs_enter_sleep_mode(dsi)) msleep(50); return ret; diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c index 267a5307041c..35ea5494e0eb 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c @@ -560,7 +560,11 @@ static int ili9882t_prepare(struct drm_panel *panel) usleep_range(10000, 11000); // MIPI needs to keep the LP11 state before the lcm_reset pin is pulled high - mipi_dsi_dcs_nop(ili->dsi); + ret = mipi_dsi_dcs_nop(ili->dsi); + if (ret < 0) { + dev_err(&ili->dsi->dev, "Failed to send NOP: %d\n", ret); + goto poweroff; + } usleep_range(1000, 2000); gpiod_set_value(ili->enable_gpio, 1); @@ -579,13 +583,13 @@ static int ili9882t_prepare(struct drm_panel *panel) return 0; poweroff: + gpiod_set_value(ili->enable_gpio, 0); regulator_disable(ili->avee); poweroffavdd: regulator_disable(ili->avdd); poweroff1v8: usleep_range(5000, 7000); regulator_disable(ili->pp1800); - gpiod_set_value(ili->enable_gpio, 0); return ret; } diff --git a/drivers/gpu/drm/panel/panel-lg-sw43408.c b/drivers/gpu/drm/panel/panel-lg-sw43408.c index 2b3a73696dce..67a98ac508f8 100644 --- a/drivers/gpu/drm/panel/panel-lg-sw43408.c +++ b/drivers/gpu/drm/panel/panel-lg-sw43408.c @@ -62,16 +62,25 @@ static int sw43408_program(struct drm_panel *panel) { struct sw43408_panel *ctx = to_panel_info(panel); struct drm_dsc_picture_parameter_set pps; + int ret; mipi_dsi_dcs_write_seq(ctx->link, MIPI_DCS_SET_GAMMA_CURVE, 0x02); - mipi_dsi_dcs_set_tear_on(ctx->link, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + ret = mipi_dsi_dcs_set_tear_on(ctx->link, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + if (ret < 0) { + dev_err(panel->dev, "Failed to set tearing: %d\n", ret); + return ret; + } mipi_dsi_dcs_write_seq(ctx->link, 0x53, 0x0c, 0x30); mipi_dsi_dcs_write_seq(ctx->link, 0x55, 0x00, 0x70, 0xdf, 0x00, 0x70, 0xdf); mipi_dsi_dcs_write_seq(ctx->link, 0xf7, 0x01, 0x49, 0x0c); - mipi_dsi_dcs_exit_sleep_mode(ctx->link); + ret = mipi_dsi_dcs_exit_sleep_mode(ctx->link); + if (ret < 0) { + dev_err(panel->dev, "Failed to exit sleep mode: %d\n", ret); + return ret; + } msleep(135); @@ -97,14 +106,22 @@ static int sw43408_program(struct drm_panel *panel) mipi_dsi_dcs_write_seq(ctx->link, 0x55, 0x04, 0x61, 0xdb, 0x04, 0x70, 0xdb); mipi_dsi_dcs_write_seq(ctx->link, 0xb0, 0xca); - mipi_dsi_dcs_set_display_on(ctx->link); + ret = mipi_dsi_dcs_set_display_on(ctx->link); + if (ret < 0) { + dev_err(panel->dev, "Failed to set display on: %d\n", ret); + return ret; + } msleep(50); ctx->link->mode_flags &= ~MIPI_DSI_MODE_LPM; drm_dsc_pps_payload_pack(&pps, ctx->link->dsc); - mipi_dsi_picture_parameter_set(ctx->link, &pps); + ret = mipi_dsi_picture_parameter_set(ctx->link, &pps); + if (ret < 0) { + dev_err(panel->dev, "Failed to set PPS: %d\n", ret); + return ret; + } ctx->link->mode_flags |= MIPI_DSI_MODE_LPM; @@ -113,8 +130,12 @@ static int sw43408_program(struct drm_panel *panel) * PPS 1 if pps_identifier is 0 * PPS 2 if pps_identifier is 1 */ - mipi_dsi_compression_mode_ext(ctx->link, true, - MIPI_DSI_COMPRESSION_DSC, 1); + ret = mipi_dsi_compression_mode_ext(ctx->link, true, + MIPI_DSI_COMPRESSION_DSC, 1); + if (ret < 0) { + dev_err(panel->dev, "Failed to set compression mode: %d\n", ret); + return ret; + } return 0; } diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index ef9f6c0716d5..149737d7a07e 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -828,3 +828,4 @@ module_platform_driver(panfrost_driver); MODULE_AUTHOR("Panfrost Project Developers"); MODULE_DESCRIPTION("Panfrost DRM Driver"); MODULE_LICENSE("GPL v2"); +MODULE_SOFTDEP("pre: governor_simpleondemand"); diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 9a0ff48f7061..463bcd3cf00f 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -2939,6 +2939,7 @@ queue_run_job(struct drm_sched_job *sched_job) pm_runtime_get(ptdev->base.dev); sched->pm.has_ref = true; } + panthor_devfreq_record_busy(sched->ptdev); } /* Update the last fence. */ diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index c6d35c33d5d6..bc24af08dfcd 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -236,6 +236,9 @@ static int qxl_add_mode(struct drm_connector *connector, return 0; mode = drm_cvt_mode(dev, width, height, 60, false, false, false); + if (!mode) + return 0; + if (preferred) mode->type |= DRM_MODE_TYPE_PREFERRED; mode->hdisplay = width; @@ -581,11 +584,11 @@ static struct qxl_bo *qxl_create_cursor(struct qxl_device *qdev, if (ret) goto err; - ret = qxl_bo_vmap(cursor_bo, &cursor_map); + ret = qxl_bo_pin_and_vmap(cursor_bo, &cursor_map); if (ret) goto err_unref; - ret = qxl_bo_vmap(user_bo, &user_map); + ret = qxl_bo_pin_and_vmap(user_bo, &user_map); if (ret) goto err_unmap; @@ -611,12 +614,12 @@ static struct qxl_bo *qxl_create_cursor(struct qxl_device *qdev, user_map.vaddr, size); } - qxl_bo_vunmap(user_bo); - qxl_bo_vunmap(cursor_bo); + qxl_bo_vunmap_and_unpin(user_bo); + qxl_bo_vunmap_and_unpin(cursor_bo); return cursor_bo; err_unmap: - qxl_bo_vunmap(cursor_bo); + qxl_bo_vunmap_and_unpin(cursor_bo); err_unref: qxl_bo_unpin(cursor_bo); qxl_bo_unref(&cursor_bo); @@ -1202,7 +1205,7 @@ int qxl_create_monitors_object(struct qxl_device *qdev) } qdev->monitors_config_bo = gem_to_qxl_bo(gobj); - ret = qxl_bo_vmap(qdev->monitors_config_bo, &map); + ret = qxl_bo_pin_and_vmap(qdev->monitors_config_bo, &map); if (ret) return ret; @@ -1233,7 +1236,7 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev) qdev->monitors_config = NULL; qdev->ram_header->monitors_config = 0; - ret = qxl_bo_vunmap(qdev->monitors_config_bo); + ret = qxl_bo_vunmap_and_unpin(qdev->monitors_config_bo); if (ret) return ret; diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 5893e27a7ae5..66635c55cf85 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -182,7 +182,7 @@ int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map) return 0; } -int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map) +int qxl_bo_pin_and_vmap(struct qxl_bo *bo, struct iosys_map *map) { int r; @@ -190,7 +190,15 @@ int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map) if (r) return r; + r = qxl_bo_pin_locked(bo); + if (r) { + qxl_bo_unreserve(bo); + return r; + } + r = qxl_bo_vmap_locked(bo, map); + if (r) + qxl_bo_unpin_locked(bo); qxl_bo_unreserve(bo); return r; } @@ -241,7 +249,7 @@ void qxl_bo_vunmap_locked(struct qxl_bo *bo) ttm_bo_vunmap(&bo->tbo, &bo->map); } -int qxl_bo_vunmap(struct qxl_bo *bo) +int qxl_bo_vunmap_and_unpin(struct qxl_bo *bo) { int r; @@ -250,6 +258,7 @@ int qxl_bo_vunmap(struct qxl_bo *bo) return r; qxl_bo_vunmap_locked(bo); + qxl_bo_unpin_locked(bo); qxl_bo_unreserve(bo); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index 1cf5bc759101..875f63221074 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -59,9 +59,9 @@ extern int qxl_bo_create(struct qxl_device *qdev, u32 priority, struct qxl_surface *surf, struct qxl_bo **bo_ptr); -int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map); +int qxl_bo_pin_and_vmap(struct qxl_bo *bo, struct iosys_map *map); int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map); -int qxl_bo_vunmap(struct qxl_bo *bo); +int qxl_bo_vunmap_and_unpin(struct qxl_bo *bo); void qxl_bo_vunmap_locked(struct qxl_bo *bo); void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset); void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 62ebbdb16253..9873172e3fd3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -2344,7 +2344,7 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp) port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, (vp2->nlayers + vp1->nlayers + vp0->nlayers - 1)); else - port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT1_MUX, 8); + port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8); layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL); diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c index 1f8a4f8adc92..801bb139075f 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c @@ -18,6 +18,12 @@ #define BO_SIZE SZ_8K +#ifdef CONFIG_PREEMPT_RT +#define ww_mutex_base_lock(b) rt_mutex_lock(b) +#else +#define ww_mutex_base_lock(b) mutex_lock(b) +#endif + struct ttm_bo_test_case { const char *description; bool interruptible; @@ -56,7 +62,7 @@ static void ttm_bo_reserve_optimistic_no_ticket(struct kunit *test) struct ttm_buffer_object *bo; int err; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); err = ttm_bo_reserve(bo, params->interruptible, params->no_wait, NULL); KUNIT_ASSERT_EQ(test, err, 0); @@ -71,7 +77,7 @@ static void ttm_bo_reserve_locked_no_sleep(struct kunit *test) bool no_wait = true; int err; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); /* Let's lock it beforehand */ dma_resv_lock(bo->base.resv, NULL); @@ -92,7 +98,7 @@ static void ttm_bo_reserve_no_wait_ticket(struct kunit *test) ww_acquire_init(&ctx, &reservation_ww_class); - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx); KUNIT_ASSERT_EQ(test, err, -EBUSY); @@ -110,7 +116,7 @@ static void ttm_bo_reserve_double_resv(struct kunit *test) ww_acquire_init(&ctx, &reservation_ww_class); - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx); KUNIT_ASSERT_EQ(test, err, 0); @@ -138,11 +144,11 @@ static void ttm_bo_reserve_deadlock(struct kunit *test) bool no_wait = false; int err; - bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE); - bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); + bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); ww_acquire_init(&ctx1, &reservation_ww_class); - mutex_lock(&bo2->base.resv->lock.base); + ww_mutex_base_lock(&bo2->base.resv->lock.base); /* The deadlock will be caught by WW mutex, don't warn about it */ lock_release(&bo2->base.resv->lock.base.dep_map, 1); @@ -208,7 +214,7 @@ static void ttm_bo_reserve_interrupted(struct kunit *test) struct task_struct *task; int err; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); task = kthread_create(threaded_ttm_bo_reserve, bo, "ttm-bo-reserve"); @@ -249,7 +255,7 @@ static void ttm_bo_unreserve_basic(struct kunit *test) KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); bo->priority = bo_prio; err = ttm_resource_alloc(bo, place, &res1); @@ -288,7 +294,7 @@ static void ttm_bo_unreserve_pinned(struct kunit *test) KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); place = ttm_place_kunit_init(test, mem_type, 0); dma_resv_lock(bo->base.resv, NULL); @@ -321,6 +327,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test) struct ttm_resource *res1, *res2; struct ttm_device *ttm_dev; struct ttm_place *place; + struct dma_resv *resv; uint32_t mem_type = TTM_PL_SYSTEM; unsigned int bo_priority = 0; int err; @@ -332,12 +339,17 @@ static void ttm_bo_unreserve_bulk(struct kunit *test) ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); + resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, ttm_dev); + err = ttm_device_kunit_init(priv, ttm_dev, false, false); KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; - bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE); - bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + dma_resv_init(resv); + + bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, resv); + bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, resv); dma_resv_lock(bo1->base.resv, NULL); ttm_bo_set_bulk_move(bo1, &lru_bulk_move); @@ -363,6 +375,8 @@ static void ttm_bo_unreserve_bulk(struct kunit *test) ttm_resource_free(bo1, &res1); ttm_resource_free(bo2, &res2); + + dma_resv_fini(resv); } static void ttm_bo_put_basic(struct kunit *test) @@ -384,7 +398,7 @@ static void ttm_bo_put_basic(struct kunit *test) KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); bo->type = ttm_bo_type_device; err = ttm_resource_alloc(bo, place, &res); @@ -445,7 +459,7 @@ static void ttm_bo_put_shared_resv(struct kunit *test) dma_fence_signal(fence); - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); bo->type = ttm_bo_type_device; bo->base.resv = external_resv; @@ -467,7 +481,7 @@ static void ttm_bo_pin_basic(struct kunit *test) KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); for (int i = 0; i < no_pins; i++) { dma_resv_lock(bo->base.resv, NULL); @@ -502,7 +516,7 @@ static void ttm_bo_pin_unpin_resource(struct kunit *test) KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); err = ttm_resource_alloc(bo, place, &res); KUNIT_ASSERT_EQ(test, err, 0); @@ -553,7 +567,7 @@ static void ttm_bo_multiple_pin_one_unpin(struct kunit *test) KUNIT_ASSERT_EQ(test, err, 0); priv->ttm_dev = ttm_dev; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); err = ttm_resource_alloc(bo, place, &res); KUNIT_ASSERT_EQ(test, err, 0); diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c index 7b7c1fa805fc..5be317a0af56 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c +++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c @@ -51,7 +51,8 @@ EXPORT_SYMBOL_GPL(ttm_device_kunit_init); struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test, struct ttm_test_devices *devs, - size_t size) + size_t size, + struct dma_resv *obj) { struct drm_gem_object gem_obj = { }; struct ttm_buffer_object *bo; @@ -61,6 +62,10 @@ struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test, KUNIT_ASSERT_NOT_NULL(test, bo); bo->base = gem_obj; + + if (obj) + bo->base.resv = obj; + err = drm_gem_object_init(devs->drm, &bo->base, size); KUNIT_ASSERT_EQ(test, err, 0); diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h index 2f51c833a536..c83d31b23c9a 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h +++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h @@ -28,7 +28,8 @@ int ttm_device_kunit_init(struct ttm_test_devices *priv, bool use_dma32); struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test, struct ttm_test_devices *devs, - size_t size); + size_t size, + struct dma_resv *obj); struct ttm_place *ttm_place_kunit_init(struct kunit *test, uint32_t mem_type, uint32_t flags); diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c index 0a3fede84da9..4643f91c6bd5 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c @@ -57,7 +57,7 @@ static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test, struct ttm_tt *tt; int err; - bo = ttm_bo_kunit_init(test, priv->devs, size); + bo = ttm_bo_kunit_init(test, priv->devs, size, NULL); KUNIT_ASSERT_NOT_NULL(test, bo); priv->mock_bo = bo; @@ -209,7 +209,7 @@ static void ttm_pool_alloc_basic_dma_addr(struct kunit *test) tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, tt); - bo = ttm_bo_kunit_init(test, devs, size); + bo = ttm_bo_kunit_init(test, devs, size, NULL); KUNIT_ASSERT_NOT_NULL(test, bo); err = ttm_sg_tt_init(tt, bo, 0, caching); diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c index 029e1f094bb0..67584058dadb 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c @@ -54,7 +54,7 @@ static void ttm_init_test_mocks(struct kunit *test, /* Make sure we have what we need for a good BO mock */ KUNIT_ASSERT_NOT_NULL(test, priv->devs->ttm_dev); - priv->bo = ttm_bo_kunit_init(test, priv->devs, size); + priv->bo = ttm_bo_kunit_init(test, priv->devs, size, NULL); priv->place = ttm_place_kunit_init(test, mem_type, flags); } diff --git a/drivers/gpu/drm/ttm/tests/ttm_tt_test.c b/drivers/gpu/drm/ttm/tests/ttm_tt_test.c index fd4502c18de6..67bf51723c92 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_tt_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_tt_test.c @@ -63,7 +63,7 @@ static void ttm_tt_init_basic(struct kunit *test) tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, tt); - bo = ttm_bo_kunit_init(test, test->priv, params->size); + bo = ttm_bo_kunit_init(test, test->priv, params->size, NULL); err = ttm_tt_init(tt, bo, page_flags, caching, extra_pages); KUNIT_ASSERT_EQ(test, err, 0); @@ -89,7 +89,7 @@ static void ttm_tt_init_misaligned(struct kunit *test) tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, tt); - bo = ttm_bo_kunit_init(test, test->priv, size); + bo = ttm_bo_kunit_init(test, test->priv, size, NULL); /* Make the object size misaligned */ bo->base.size += 1; @@ -110,7 +110,7 @@ static void ttm_tt_fini_basic(struct kunit *test) tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, tt); - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); err = ttm_tt_init(tt, bo, 0, caching, 0); KUNIT_ASSERT_EQ(test, err, 0); @@ -130,7 +130,7 @@ static void ttm_tt_fini_sg(struct kunit *test) tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, tt); - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); err = ttm_sg_tt_init(tt, bo, 0, caching); KUNIT_ASSERT_EQ(test, err, 0); @@ -151,7 +151,7 @@ static void ttm_tt_fini_shmem(struct kunit *test) tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, tt); - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); err = ttm_tt_init(tt, bo, 0, caching, 0); KUNIT_ASSERT_EQ(test, err, 0); @@ -168,7 +168,7 @@ static void ttm_tt_create_basic(struct kunit *test) struct ttm_buffer_object *bo; int err; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); bo->type = ttm_bo_type_device; dma_resv_lock(bo->base.resv, NULL); @@ -187,7 +187,7 @@ static void ttm_tt_create_invalid_bo_type(struct kunit *test) struct ttm_buffer_object *bo; int err; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); bo->type = ttm_bo_type_sg + 1; dma_resv_lock(bo->base.resv, NULL); @@ -208,7 +208,7 @@ static void ttm_tt_create_ttm_exists(struct kunit *test) tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, tt); - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); err = ttm_tt_init(tt, bo, 0, caching, 0); KUNIT_ASSERT_EQ(test, err, 0); @@ -239,7 +239,7 @@ static void ttm_tt_create_failed(struct kunit *test) struct ttm_buffer_object *bo; int err; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); /* Update ttm_device_funcs so we don't alloc ttm_tt */ devs->ttm_dev->funcs = &ttm_dev_empty_funcs; @@ -257,7 +257,7 @@ static void ttm_tt_destroy_basic(struct kunit *test) struct ttm_buffer_object *bo; int err; - bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE); + bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL); dma_resv_lock(bo->base.resv, NULL); err = ttm_tt_create(bo, false); diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index 7702359c90c2..751da3a294c4 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -527,8 +527,7 @@ struct drm_connector *udl_connector_init(struct drm_device *dev) drm_connector_helper_add(connector, &udl_connector_helper_funcs); - connector->polled = DRM_CONNECTOR_POLL_HPD | - DRM_CONNECTOR_POLL_CONNECT | + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; return connector; diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c index d46f87a039f2..b3d3c065dd9d 100644 --- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c +++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c @@ -159,12 +159,16 @@ void intel_hdcp_gsc_fini(struct xe_device *xe) { struct intel_hdcp_gsc_message *hdcp_message = xe->display.hdcp.hdcp_message; + struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter; - if (!hdcp_message) - return; + if (hdcp_message) { + xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); + kfree(hdcp_message); + xe->display.hdcp.hdcp_message = NULL; + } - xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); - kfree(hdcp_message); + kfree(arb); + xe->display.hdcp.arbiter = NULL; } static int xe_gsc_send_sync(struct xe_device *xe, diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index bc1f794e3e61..b6f3a43d637f 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -317,7 +317,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, struct xe_device *xe = xe_bo_device(bo); struct xe_ttm_tt *tt; unsigned long extra_pages; - enum ttm_caching caching; + enum ttm_caching caching = ttm_cached; int err; tt = kzalloc(sizeof(*tt), GFP_KERNEL); @@ -331,26 +331,35 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), PAGE_SIZE); - switch (bo->cpu_caching) { - case DRM_XE_GEM_CPU_CACHING_WC: - caching = ttm_write_combined; - break; - default: - caching = ttm_cached; - break; - } - - WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); - /* - * Display scanout is always non-coherent with the CPU cache. - * - * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and - * require a CPU:WC mapping. + * DGFX system memory is always WB / ttm_cached, since + * other caching modes are only supported on x86. DGFX + * GPU system memory accesses are always coherent with the + * CPU. */ - if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || - (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE)) - caching = ttm_write_combined; + if (!IS_DGFX(xe)) { + switch (bo->cpu_caching) { + case DRM_XE_GEM_CPU_CACHING_WC: + caching = ttm_write_combined; + break; + default: + caching = ttm_cached; + break; + } + + WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); + + /* + * Display scanout is always non-coherent with the CPU cache. + * + * For Xe_LPG and beyond, PPGTT PTE lookups are also + * non-coherent and require a CPU:WC mapping. + */ + if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || + (xe->info.graphics_verx100 >= 1270 && + bo->flags & XE_BO_FLAG_PAGETABLE)) + caching = ttm_write_combined; + } err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages); if (err) { diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 86422e113d39..10450f1fbbde 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -66,7 +66,8 @@ struct xe_bo { /** * @cpu_caching: CPU caching mode. Currently only used for userspace - * objects. + * objects. Exceptions are system memory on DGFX, which is always + * WB. */ u16 cpu_caching; diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index 97eeb973e897..074344c739ab 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -118,7 +118,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) u64 addresses[XE_HW_ENGINE_MAX_INSTANCE]; struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn}; struct drm_exec *exec = &vm_exec.exec; - u32 i, num_syncs = 0, num_ufence = 0; + u32 i, num_syncs, num_ufence = 0; struct xe_sched_job *job; struct xe_vm *vm; bool write_locked, skip_retry = false; @@ -156,15 +156,15 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) vm = q->vm; - for (i = 0; i < args->num_syncs; i++) { - err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++], - &syncs_user[i], SYNC_PARSE_FLAG_EXEC | + for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) { + err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs], + &syncs_user[num_syncs], SYNC_PARSE_FLAG_EXEC | (xe_vm_in_lr_mode(vm) ? SYNC_PARSE_FLAG_LR_MODE : 0)); if (err) goto err_syncs; - if (xe_sync_is_ufence(&syncs[i])) + if (xe_sync_is_ufence(&syncs[num_syncs])) num_ufence++; } @@ -325,8 +325,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (err == -EAGAIN && !skip_retry) goto retry; err_syncs: - for (i = 0; i < num_syncs; i++) - xe_sync_entry_cleanup(&syncs[i]); + while (num_syncs--) + xe_sync_entry_cleanup(&syncs[num_syncs]); kfree(syncs); err_exec_queue: xe_exec_queue_put(q); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 6c2cfc54442c..4f40fe24c649 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -1527,6 +1527,7 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs) u64 fair; fair = div_u64(available, num_vfs); + fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */ fair = ALIGN_DOWN(fair, alignment); #ifdef MAX_FAIR_LMEM fair = min_t(u64, MAX_FAIR_LMEM, fair); diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index face8d6b2a6f..f5781939de9c 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -269,6 +269,7 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev) return 0; err_disp: + drm_bridge_remove(dpsub->bridge); zynqmp_disp_remove(dpsub); err_dp: zynqmp_dp_remove(dpsub); diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c index 43bf416b33d5..f25583ce92e6 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_kms.c +++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c @@ -433,23 +433,28 @@ static int zynqmp_dpsub_kms_init(struct zynqmp_dpsub *dpsub) DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) { dev_err(dpsub->dev, "failed to attach bridge to encoder\n"); - return ret; + goto err_encoder; } /* Create the connector for the chain of bridges. */ connector = drm_bridge_connector_init(&dpsub->drm->dev, encoder); if (IS_ERR(connector)) { dev_err(dpsub->dev, "failed to created connector\n"); - return PTR_ERR(connector); + ret = PTR_ERR(connector); + goto err_encoder; } ret = drm_connector_attach_encoder(connector, encoder); if (ret < 0) { dev_err(dpsub->dev, "failed to attach connector to encoder\n"); - return ret; + goto err_encoder; } return 0; + +err_encoder: + drm_encoder_cleanup(encoder); + return ret; } static void zynqmp_dpsub_drm_release(struct drm_device *drm, void *res) @@ -529,5 +534,6 @@ void zynqmp_dpsub_drm_cleanup(struct zynqmp_dpsub *dpsub) drm_dev_unregister(drm); drm_atomic_helper_shutdown(drm); + drm_encoder_cleanup(&dpsub->drm->encoder); drm_kms_helper_poll_fini(drm); } diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 4224ffb30483..ec3336804720 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -1900,7 +1900,7 @@ static void adt7475_read_pwm(struct i2c_client *client, int index) data->pwm[CONTROL][index] &= ~0xE0; data->pwm[CONTROL][index] |= (7 << 5); - i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index), + i2c_smbus_write_byte_data(client, PWM_REG(index), data->pwm[INPUT][index]); i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index), diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c index 06750bb93c23..f74ce9c25bf7 100644 --- a/drivers/hwmon/ltc2991.c +++ b/drivers/hwmon/ltc2991.c @@ -225,8 +225,8 @@ static umode_t ltc2991_is_visible(const void *data, case hwmon_temp: switch (attr) { case hwmon_temp_input: - if (st->temp_en[channel] || - channel == LTC2991_T_INT_CH_NR) + if (channel == LTC2991_T_INT_CH_NR || + st->temp_en[channel]) return 0444; break; } diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c index d161ba0e7813..8aac8278193f 100644 --- a/drivers/hwmon/max6697.c +++ b/drivers/hwmon/max6697.c @@ -311,6 +311,7 @@ static ssize_t temp_store(struct device *dev, return ret; mutex_lock(&data->update_lock); + temp = clamp_val(temp, -1000000, 1000000); /* prevent underflow */ temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset; temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127); data->temp[nr][index] = temp; @@ -428,14 +429,14 @@ static SENSOR_DEVICE_ATTR_RO(temp6_max_alarm, alarm, 20); static SENSOR_DEVICE_ATTR_RO(temp7_max_alarm, alarm, 21); static SENSOR_DEVICE_ATTR_RO(temp8_max_alarm, alarm, 23); -static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 14); +static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 15); static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 8); static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 9); static SENSOR_DEVICE_ATTR_RO(temp4_crit_alarm, alarm, 10); static SENSOR_DEVICE_ATTR_RO(temp5_crit_alarm, alarm, 11); static SENSOR_DEVICE_ATTR_RO(temp6_crit_alarm, alarm, 12); static SENSOR_DEVICE_ATTR_RO(temp7_crit_alarm, alarm, 13); -static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 15); +static SENSOR_DEVICE_ATTR_RO(temp8_crit_alarm, alarm, 14); static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 1); static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2); diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c index 9d550f5697fa..57a009552cc5 100644 --- a/drivers/hwtracing/coresight/coresight-platform.c +++ b/drivers/hwtracing/coresight/coresight-platform.c @@ -297,8 +297,10 @@ static int of_get_coresight_platform_data(struct device *dev, continue; ret = of_coresight_parse_endpoint(dev, ep, pdata); - if (ret) + if (ret) { + of_node_put(ep); return ret; + } } return 0; diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c index d7e966a25583..4e7d6a43ee9b 100644 --- a/drivers/i3c/master/mipi-i3c-hci/core.c +++ b/drivers/i3c/master/mipi-i3c-hci/core.c @@ -631,6 +631,7 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id) static int i3c_hci_init(struct i3c_hci *hci) { u32 regval, offset; + bool size_in_dwords; int ret; /* Validate HCI hardware version */ @@ -654,11 +655,16 @@ static int i3c_hci_init(struct i3c_hci *hci) hci->caps = reg_read(HC_CAPABILITIES); DBG("caps = %#x", hci->caps); + size_in_dwords = hci->version_major < 1 || + (hci->version_major == 1 && hci->version_minor < 1); + regval = reg_read(DAT_SECTION); offset = FIELD_GET(DAT_TABLE_OFFSET, regval); hci->DAT_regs = offset ? hci->base_regs + offset : NULL; hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval); hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8; + if (size_in_dwords) + hci->DAT_entries = 4 * hci->DAT_entries / hci->DAT_entry_size; dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n", hci->DAT_entries, hci->DAT_entry_size, offset); @@ -667,6 +673,8 @@ static int i3c_hci_init(struct i3c_hci *hci) hci->DCT_regs = offset ? hci->base_regs + offset : NULL; hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval); hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16; + if (size_in_dwords) + hci->DCT_entries = 4 * hci->DCT_entries / hci->DCT_entry_size; dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n", hci->DCT_entries, hci->DCT_entry_size, offset); diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c index 8f5b9c3f6e3d..1fd2211e2964 100644 --- a/drivers/iio/adc/ad9467.c +++ b/drivers/iio/adc/ad9467.c @@ -141,9 +141,10 @@ struct ad9467_state { struct gpio_desc *pwrdown_gpio; /* ensure consistent state obtained on multiple related accesses */ struct mutex lock; + u8 buf[3] __aligned(IIO_DMA_MINALIGN); }; -static int ad9467_spi_read(struct spi_device *spi, unsigned int reg) +static int ad9467_spi_read(struct ad9467_state *st, unsigned int reg) { unsigned char tbuf[2], rbuf[1]; int ret; @@ -151,7 +152,7 @@ static int ad9467_spi_read(struct spi_device *spi, unsigned int reg) tbuf[0] = 0x80 | (reg >> 8); tbuf[1] = reg & 0xFF; - ret = spi_write_then_read(spi, + ret = spi_write_then_read(st->spi, tbuf, ARRAY_SIZE(tbuf), rbuf, ARRAY_SIZE(rbuf)); @@ -161,35 +162,32 @@ static int ad9467_spi_read(struct spi_device *spi, unsigned int reg) return rbuf[0]; } -static int ad9467_spi_write(struct spi_device *spi, unsigned int reg, +static int ad9467_spi_write(struct ad9467_state *st, unsigned int reg, unsigned int val) { - unsigned char buf[3]; + st->buf[0] = reg >> 8; + st->buf[1] = reg & 0xFF; + st->buf[2] = val; - buf[0] = reg >> 8; - buf[1] = reg & 0xFF; - buf[2] = val; - - return spi_write(spi, buf, ARRAY_SIZE(buf)); + return spi_write(st->spi, st->buf, ARRAY_SIZE(st->buf)); } static int ad9467_reg_access(struct iio_dev *indio_dev, unsigned int reg, unsigned int writeval, unsigned int *readval) { struct ad9467_state *st = iio_priv(indio_dev); - struct spi_device *spi = st->spi; int ret; if (!readval) { guard(mutex)(&st->lock); - ret = ad9467_spi_write(spi, reg, writeval); + ret = ad9467_spi_write(st, reg, writeval); if (ret) return ret; - return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER, + return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER, AN877_ADC_TRANSFER_SYNC); } - ret = ad9467_spi_read(spi, reg); + ret = ad9467_spi_read(st, reg); if (ret < 0) return ret; *readval = ret; @@ -295,7 +293,7 @@ static int ad9467_get_scale(struct ad9467_state *st, int *val, int *val2) unsigned int i, vref_val; int ret; - ret = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF); + ret = ad9467_spi_read(st, AN877_ADC_REG_VREF); if (ret < 0) return ret; @@ -330,31 +328,31 @@ static int ad9467_set_scale(struct ad9467_state *st, int val, int val2) continue; guard(mutex)(&st->lock); - ret = ad9467_spi_write(st->spi, AN877_ADC_REG_VREF, + ret = ad9467_spi_write(st, AN877_ADC_REG_VREF, info->scale_table[i][1]); if (ret < 0) return ret; - return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER, + return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER, AN877_ADC_TRANSFER_SYNC); } return -EINVAL; } -static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode) +static int ad9467_outputmode_set(struct ad9467_state *st, unsigned int mode) { int ret; - ret = ad9467_spi_write(spi, AN877_ADC_REG_OUTPUT_MODE, mode); + ret = ad9467_spi_write(st, AN877_ADC_REG_OUTPUT_MODE, mode); if (ret < 0) return ret; - return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER, + return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER, AN877_ADC_TRANSFER_SYNC); } -static int ad9647_calibrate_prepare(const struct ad9467_state *st) +static int ad9647_calibrate_prepare(struct ad9467_state *st) { struct iio_backend_data_fmt data = { .enable = false, @@ -362,17 +360,17 @@ static int ad9647_calibrate_prepare(const struct ad9467_state *st) unsigned int c; int ret; - ret = ad9467_spi_write(st->spi, AN877_ADC_REG_TEST_IO, + ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO, AN877_ADC_TESTMODE_PN9_SEQ); if (ret) return ret; - ret = ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER, + ret = ad9467_spi_write(st, AN877_ADC_REG_TRANSFER, AN877_ADC_TRANSFER_SYNC); if (ret) return ret; - ret = ad9467_outputmode_set(st->spi, st->info->default_output_mode); + ret = ad9467_outputmode_set(st, st->info->default_output_mode); if (ret) return ret; @@ -390,7 +388,7 @@ static int ad9647_calibrate_prepare(const struct ad9467_state *st) return iio_backend_chan_enable(st->back, 0); } -static int ad9647_calibrate_polarity_set(const struct ad9467_state *st, +static int ad9647_calibrate_polarity_set(struct ad9467_state *st, bool invert) { enum iio_backend_sample_trigger trigger; @@ -401,7 +399,7 @@ static int ad9647_calibrate_polarity_set(const struct ad9467_state *st, if (invert) phase |= AN877_ADC_INVERT_DCO_CLK; - return ad9467_spi_write(st->spi, AN877_ADC_REG_OUTPUT_PHASE, + return ad9467_spi_write(st, AN877_ADC_REG_OUTPUT_PHASE, phase); } @@ -437,19 +435,18 @@ static unsigned int ad9467_find_optimal_point(const unsigned long *calib_map, return cnt; } -static int ad9467_calibrate_apply(const struct ad9467_state *st, - unsigned int val) +static int ad9467_calibrate_apply(struct ad9467_state *st, unsigned int val) { unsigned int lane; int ret; if (st->info->has_dco) { - ret = ad9467_spi_write(st->spi, AN877_ADC_REG_OUTPUT_DELAY, + ret = ad9467_spi_write(st, AN877_ADC_REG_OUTPUT_DELAY, val); if (ret) return ret; - return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER, + return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER, AN877_ADC_TRANSFER_SYNC); } @@ -462,7 +459,7 @@ static int ad9467_calibrate_apply(const struct ad9467_state *st, return 0; } -static int ad9647_calibrate_stop(const struct ad9467_state *st) +static int ad9647_calibrate_stop(struct ad9467_state *st) { struct iio_backend_data_fmt data = { .sign_extend = true, @@ -487,16 +484,16 @@ static int ad9647_calibrate_stop(const struct ad9467_state *st) } mode = st->info->default_output_mode | AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT; - ret = ad9467_outputmode_set(st->spi, mode); + ret = ad9467_outputmode_set(st, mode); if (ret) return ret; - ret = ad9467_spi_write(st->spi, AN877_ADC_REG_TEST_IO, + ret = ad9467_spi_write(st, AN877_ADC_REG_TEST_IO, AN877_ADC_TESTMODE_OFF); if (ret) return ret; - return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER, + return ad9467_spi_write(st, AN877_ADC_REG_TRANSFER, AN877_ADC_TRANSFER_SYNC); } @@ -846,7 +843,7 @@ static int ad9467_probe(struct spi_device *spi) if (ret) return ret; - id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID); + id = ad9467_spi_read(st, AN877_ADC_REG_CHIP_ID); if (id != st->info->id) { dev_err(&spi->dev, "Mismatch CHIP_ID, got 0x%X, expected 0x%X\n", id, st->info->id); diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c index 0cf0d81358fd..bf51d619ebbc 100644 --- a/drivers/iio/adc/adi-axi-adc.c +++ b/drivers/iio/adc/adi-axi-adc.c @@ -85,6 +85,7 @@ static int axi_adc_enable(struct iio_backend *back) struct adi_axi_adc_state *st = iio_backend_get_priv(back); int ret; + guard(mutex)(&st->lock); ret = regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN, ADI_AXI_REG_RSTN_MMCM_RSTN); if (ret) @@ -99,6 +100,7 @@ static void axi_adc_disable(struct iio_backend *back) { struct adi_axi_adc_state *st = iio_backend_get_priv(back); + guard(mutex)(&st->lock); regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0); } diff --git a/drivers/iio/frequency/adrf6780.c b/drivers/iio/frequency/adrf6780.c index b4defb82f37e..3f46032c9275 100644 --- a/drivers/iio/frequency/adrf6780.c +++ b/drivers/iio/frequency/adrf6780.c @@ -9,7 +9,6 @@ #include <linux/bits.h> #include <linux/clk.h> #include <linux/clkdev.h> -#include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/iio/iio.h> diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c index b51eb6cb766f..59d7615c0f56 100644 --- a/drivers/iio/industrialio-gts-helper.c +++ b/drivers/iio/industrialio-gts-helper.c @@ -362,17 +362,20 @@ static int iio_gts_build_avail_time_table(struct iio_gts *gts) for (i = gts->num_itime - 1; i >= 0; i--) { int new = gts->itime_table[i].time_us; - if (times[idx] < new) { + if (idx == 0 || times[idx - 1] < new) { times[idx++] = new; continue; } - for (j = 0; j <= idx; j++) { + for (j = 0; j < idx; j++) { + if (times[j] == new) + break; if (times[j] > new) { memmove(×[j + 1], ×[j], (idx - j) * sizeof(int)); times[j] = new; idx++; + break; } } } diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index c02a96d3572a..6791df64a5fe 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -794,7 +794,6 @@ static struct ib_gid_table *alloc_gid_table(int sz) static void release_gid_table(struct ib_device *device, struct ib_gid_table *table) { - bool leak = false; int i; if (!table) @@ -803,15 +802,12 @@ static void release_gid_table(struct ib_device *device, for (i = 0; i < table->sz; i++) { if (is_gid_entry_free(table->data_vec[i])) continue; - if (kref_read(&table->data_vec[i]->kref) > 1) { - dev_err(&device->dev, - "GID entry ref leak for index %d ref=%u\n", i, - kref_read(&table->data_vec[i]->kref)); - leak = true; - } + + WARN_ONCE(true, + "GID entry ref leak for dev %s index %d ref=%u\n", + dev_name(&device->dev), i, + kref_read(&table->data_vec[i]->kref)); } - if (leak) - return; mutex_destroy(&table->lock); kfree(table->data_vec); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 55aa7aa32d4a..46d1c2c32d71 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2146,6 +2146,9 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, unsigned long flags; int ret; + if (!rdma_is_port_valid(ib_dev, port)) + return -EINVAL; + /* * Drivers wish to call this before ib_register_driver, so we have to * setup the port data early. @@ -2154,9 +2157,6 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, if (ret) return ret; - if (!rdma_is_port_valid(ib_dev, port)) - return -EINVAL; - pdata = &ib_dev->port_data[port]; spin_lock_irqsave(&pdata->netdev_lock, flags); old_ndev = rcu_dereference_protected( @@ -2166,16 +2166,12 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, return 0; } - if (old_ndev) - netdev_tracker_free(ndev, &pdata->netdev_tracker); - if (ndev) - netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC); rcu_assign_pointer(pdata->netdev, ndev); + netdev_put(old_ndev, &pdata->netdev_tracker); + netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC); spin_unlock_irqrestore(&pdata->netdev_lock, flags); add_ndev_hash(pdata); - __dev_put(old_ndev); - return 0; } EXPORT_SYMBOL(ib_device_set_netdev); diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 0301fcad4b48..bf3265e67865 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -368,8 +368,10 @@ EXPORT_SYMBOL(iw_cm_disconnect); * * Clean up all resources associated with the connection and release * the initial reference taken by iw_create_cm_id. + * + * Returns true if and only if the last cm_id_priv reference has been dropped. */ -static void destroy_cm_id(struct iw_cm_id *cm_id) +static bool destroy_cm_id(struct iw_cm_id *cm_id) { struct iwcm_id_private *cm_id_priv; struct ib_qp *qp; @@ -439,7 +441,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM); } - (void)iwcm_deref_id(cm_id_priv); + return iwcm_deref_id(cm_id_priv); } /* @@ -450,7 +452,8 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) */ void iw_destroy_cm_id(struct iw_cm_id *cm_id) { - destroy_cm_id(cm_id); + if (!destroy_cm_id(cm_id)) + flush_workqueue(iwcm_wq); } EXPORT_SYMBOL(iw_destroy_cm_id); @@ -1034,7 +1037,7 @@ static void cm_work_handler(struct work_struct *_work) if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) { ret = process_event(cm_id_priv, &levent); if (ret) - destroy_cm_id(&cm_id_priv->id); + WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id)); } else pr_debug("dropping event %d\n", levent.event); if (iwcm_deref_id(cm_id_priv)) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index ce9c5bae83bf..582e83a36ccb 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -2479,7 +2479,7 @@ static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp, break; case IB_WR_SEND_WITH_IMM: wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM; - wqe->send.imm_data = wr->ex.imm_data; + wqe->send.imm_data = be32_to_cpu(wr->ex.imm_data); break; case IB_WR_SEND_WITH_INV: wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV; @@ -2509,7 +2509,7 @@ static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr, break; case IB_WR_RDMA_WRITE_WITH_IMM: wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM; - wqe->rdma.imm_data = wr->ex.imm_data; + wqe->rdma.imm_data = be32_to_cpu(wr->ex.imm_data); break; case IB_WR_RDMA_READ: wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ; @@ -3581,7 +3581,7 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, wc->byte_len = orig_cqe->length; wc->qp = &gsi_qp->ib_qp; - wc->ex.imm_data = orig_cqe->immdata; + wc->ex.imm_data = cpu_to_be32(le32_to_cpu(orig_cqe->immdata)); wc->src_qp = orig_cqe->src_qp; memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { @@ -3726,7 +3726,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) (unsigned long)(cqe->qp_handle), struct bnxt_re_qp, qplib_qp); wc->qp = &qp->ib_qp; - wc->ex.imm_data = cqe->immdata; + wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immdata)); wc->src_qp = cqe->src_qp; memcpy(wc->smac, cqe->smac, ETH_ALEN); wc->port_num = 1; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 7fd4506b3584..244da20d1181 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -164,7 +164,7 @@ struct bnxt_qplib_swqe { /* Send, with imm, inval key */ struct { union { - __be32 imm_data; + u32 imm_data; u32 inv_key; }; u32 q_key; @@ -182,7 +182,7 @@ struct bnxt_qplib_swqe { /* RDMA write, with imm, read */ struct { union { - __be32 imm_data; + u32 imm_data; u32 inv_key; }; u64 remote_va; @@ -389,7 +389,7 @@ struct bnxt_qplib_cqe { u16 cfa_meta; u64 wr_id; union { - __be32 immdata; + __le32 immdata; u32 invrkey; }; u64 qp_handle; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index ff0b3f68ee3a..7d5931872f8a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -83,6 +83,7 @@ #define MR_TYPE_DMA 0x03 #define HNS_ROCE_FRMR_MAX_PA 512 +#define HNS_ROCE_FRMR_ALIGN_SIZE 128 #define PKEY_ID 0xffff #define NODE_DESC_SIZE 64 @@ -91,6 +92,8 @@ /* Configure to HW for PAGE_SIZE larger than 4KB */ #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12) +#define ATOMIC_WR_LEN 8 + #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 #define SRQ_DB_REG 0x230 @@ -187,6 +190,9 @@ enum { #define HNS_HW_PAGE_SHIFT 12 #define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT) +#define HNS_HW_MAX_PAGE_SHIFT 27 +#define HNS_HW_MAX_PAGE_SIZE (1 << HNS_HW_MAX_PAGE_SHIFT) + struct hns_roce_uar { u64 pfn; unsigned long index; @@ -715,6 +721,7 @@ struct hns_roce_eq { int shift; int event_type; int sub_type; + struct work_struct work; }; struct hns_roce_eq_table { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 4287818a737f..621b057fb9da 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -36,6 +36,7 @@ #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/types.h> +#include <linux/workqueue.h> #include <net/addrconf.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> @@ -591,11 +592,16 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || - wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) + wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { + if (msg_len != ATOMIC_WR_LEN) + return -EINVAL; set_atomic_seg(wr, rc_sq_wqe, valid_num_sge); - else if (wr->opcode != IB_WR_REG_MR) + } else if (wr->opcode != IB_WR_REG_MR) { ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe, &curr_idx, valid_num_sge); + if (ret) + return ret; + } /* * The pipeline can sequentially post all valid WQEs into WQ buffer, @@ -1269,12 +1275,38 @@ static int hns_roce_cmd_err_convert_errno(u16 desc_ret) return -EIO; } +static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout) +{ + static const struct hns_roce_cmdq_tx_timeout_map cmdq_tx_timeout[] = { + {HNS_ROCE_OPC_POST_MB, HNS_ROCE_OPC_POST_MB_TIMEOUT}, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout); i++) + if (cmdq_tx_timeout[i].opcode == opcode) + return cmdq_tx_timeout[i].tx_timeout; + + return tx_timeout; +} + +static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout); + u32 timeout = 0; + + do { + if (hns_roce_cmq_csq_done(hr_dev)) + break; + udelay(1); + } while (++timeout < tx_timeout); +} + static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, struct hns_roce_cmq_desc *desc, int num) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; - u32 timeout = 0; u16 desc_ret; u32 tail; int ret; @@ -1295,12 +1327,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]); - do { - if (hns_roce_cmq_csq_done(hr_dev)) - break; - udelay(1); - } while (++timeout < priv->cmq.tx_timeout); - + hns_roce_wait_csq_done(hr_dev, le16_to_cpu(desc->opcode)); if (hns_roce_cmq_csq_done(hr_dev)) { ret = 0; for (i = 0; i < num; i++) { @@ -2457,14 +2484,16 @@ static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev, static struct hns_roce_link_table * alloc_link_table_buf(struct hns_roce_dev *hr_dev) { + u16 total_sl = hr_dev->caps.sl_num * hr_dev->func_num; struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_link_table *link_tbl; u32 pg_shift, size, min_size; link_tbl = &priv->ext_llm; pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT; - size = hr_dev->caps.num_qps * HNS_ROCE_V2_EXT_LLM_ENTRY_SZ; - min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(hr_dev->caps.sl_num) << pg_shift; + size = hr_dev->caps.num_qps * hr_dev->func_num * + HNS_ROCE_V2_EXT_LLM_ENTRY_SZ; + min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(total_sl) << pg_shift; /* Alloc data table */ size = max(size, min_size); @@ -6135,33 +6164,11 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) !!(eq->cons_index & eq->entries)) ? ceqe : NULL; } -static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) +static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_eq *eq) { - struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq); - irqreturn_t ceqe_found = IRQ_NONE; - u32 cqn; - - while (ceqe) { - /* Make sure we read CEQ entry after we have checked the - * ownership bit - */ - dma_rmb(); - - cqn = hr_reg_read(ceqe, CEQE_CQN); - - hns_roce_cq_completion(hr_dev, cqn); - - ++eq->cons_index; - ceqe_found = IRQ_HANDLED; - atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CEQE_CNT]); + queue_work(system_bh_wq, &eq->work); - ceqe = next_ceqe_sw_v2(eq); - } - - update_eq_db(eq); - - return IRQ_RETVAL(ceqe_found); + return IRQ_HANDLED; } static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr) @@ -6172,7 +6179,7 @@ static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr) if (eq->type_flag == HNS_ROCE_CEQ) /* Completion event interrupt */ - int_work = hns_roce_v2_ceq_int(hr_dev, eq); + int_work = hns_roce_v2_ceq_int(eq); else /* Asynchronous event interrupt */ int_work = hns_roce_v2_aeq_int(hr_dev, eq); @@ -6384,9 +6391,16 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev, roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag); } -static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn) +static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) +{ + hns_roce_mtr_destroy(hr_dev, &eq->mtr); +} + +static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, + struct hns_roce_eq *eq) { struct device *dev = hr_dev->dev; + int eqn = eq->eqn; int ret; u8 cmd; @@ -6397,12 +6411,9 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn) ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M); if (ret) - dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn); -} + dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); -static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) -{ - hns_roce_mtr_destroy(hr_dev, &eq->mtr); + free_eq_buf(hr_dev, eq); } static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) @@ -6540,6 +6551,34 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, return ret; } +static void hns_roce_ceq_work(struct work_struct *work) +{ + struct hns_roce_eq *eq = from_work(eq, work, work); + struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq); + struct hns_roce_dev *hr_dev = eq->hr_dev; + int ceqe_num = 0; + u32 cqn; + + while (ceqe && ceqe_num < hr_dev->caps.ceqe_depth) { + /* Make sure we read CEQ entry after we have checked the + * ownership bit + */ + dma_rmb(); + + cqn = hr_reg_read(ceqe, CEQE_CQN); + + hns_roce_cq_completion(hr_dev, cqn); + + ++eq->cons_index; + ++ceqe_num; + atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CEQE_CNT]); + + ceqe = next_ceqe_sw_v2(eq); + } + + update_eq_db(eq); +} + static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, int comp_num, int aeq_num, int other_num) { @@ -6571,21 +6610,24 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, j - other_num - aeq_num); for (j = 0; j < irq_num; j++) { - if (j < other_num) + if (j < other_num) { ret = request_irq(hr_dev->irq[j], hns_roce_v2_msix_interrupt_abn, 0, hr_dev->irq_names[j], hr_dev); - - else if (j < (other_num + comp_num)) + } else if (j < (other_num + comp_num)) { + INIT_WORK(&eq_table->eq[j - other_num].work, + hns_roce_ceq_work); ret = request_irq(eq_table->eq[j - other_num].irq, hns_roce_v2_msix_interrupt_eq, 0, hr_dev->irq_names[j + aeq_num], &eq_table->eq[j - other_num]); - else + } else { ret = request_irq(eq_table->eq[j - other_num].irq, hns_roce_v2_msix_interrupt_eq, 0, hr_dev->irq_names[j - comp_num], &eq_table->eq[j - other_num]); + } + if (ret) { dev_err(hr_dev->dev, "request irq error!\n"); goto err_request_failed; @@ -6595,12 +6637,16 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num, return 0; err_request_failed: - for (j -= 1; j >= 0; j--) - if (j < other_num) + for (j -= 1; j >= 0; j--) { + if (j < other_num) { free_irq(hr_dev->irq[j], hr_dev); - else - free_irq(eq_table->eq[j - other_num].irq, - &eq_table->eq[j - other_num]); + continue; + } + free_irq(eq_table->eq[j - other_num].irq, + &eq_table->eq[j - other_num]); + if (j < other_num + comp_num) + cancel_work_sync(&eq_table->eq[j - other_num].work); + } err_kzalloc_failed: for (i -= 1; i >= 0; i--) @@ -6621,8 +6667,11 @@ static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev) for (i = 0; i < hr_dev->caps.num_other_vectors; i++) free_irq(hr_dev->irq[i], hr_dev); - for (i = 0; i < eq_num; i++) + for (i = 0; i < eq_num; i++) { free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]); + if (i < hr_dev->caps.num_comp_vectors) + cancel_work_sync(&hr_dev->eq_table.eq[i].work); + } for (i = 0; i < irq_num; i++) kfree(hr_dev->irq_names[i]); @@ -6711,7 +6760,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) err_create_eq_fail: for (i -= 1; i >= 0; i--) - free_eq_buf(hr_dev, &eq_table->eq[i]); + hns_roce_v2_destroy_eqc(hr_dev, &eq_table->eq[i]); kfree(eq_table->eq); return ret; @@ -6731,11 +6780,8 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev) __hns_roce_free_irq(hr_dev); destroy_workqueue(hr_dev->irq_workq); - for (i = 0; i < eq_num; i++) { - hns_roce_v2_destroy_eqc(hr_dev, i); - - free_eq_buf(hr_dev, &eq_table->eq[i]); - } + for (i = 0; i < eq_num; i++) + hns_roce_v2_destroy_eqc(hr_dev, &eq_table->eq[i]); kfree(eq_table->eq); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index def1d15a03c7..c65f68a14a26 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -224,6 +224,12 @@ enum hns_roce_opcode_type { HNS_SWITCH_PARAMETER_CFG = 0x1033, }; +#define HNS_ROCE_OPC_POST_MB_TIMEOUT 35000 +struct hns_roce_cmdq_tx_timeout_map { + u16 opcode; + u32 tx_timeout; +}; + enum { TYPE_CRQ, TYPE_CSQ, diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 1a61dceb3319..846da8c78b8b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -443,6 +443,11 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, struct hns_roce_mtr *mtr = &mr->pbl_mtr; int ret, sg_num = 0; + if (!IS_ALIGNED(*sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) || + ibmr->page_size < HNS_HW_PAGE_SIZE || + ibmr->page_size > HNS_HW_MAX_PAGE_SIZE) + return sg_num; + mr->npages = 0; mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, sizeof(dma_addr_t), GFP_KERNEL); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index db34665d1dfb..1de384ce4d0e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -532,13 +532,15 @@ static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi, { unsigned int inline_sge; - inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE; + if (!max_inline_data) + return 0; /* * if max_inline_data less than * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE, * In addition to ud's mode, no need to extend sge. */ + inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE; if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE) inline_sge = 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index f1997abc97ca..c9b8233f4b05 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -297,7 +297,7 @@ static int set_srq_basic_param(struct hns_roce_srq *srq, max_sge = proc_srq_sge(hr_dev, srq, !!udata); if (attr->max_wr > hr_dev->caps.max_srq_wrs || - attr->max_sge > max_sge) { + attr->max_sge > max_sge || !attr->max_sge) { ibdev_err(&hr_dev->ib_dev, "invalid SRQ attr, depth = %u, sge = %u.\n", attr->max_wr, attr->max_sge); diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c index 7e09ceb3da53..7bb7e0639200 100644 --- a/drivers/infiniband/hw/mana/device.c +++ b/drivers/infiniband/hw/mana/device.c @@ -5,6 +5,7 @@ #include "mana_ib.h" #include <net/mana/mana_auxiliary.h> +#include <net/addrconf.h> MODULE_DESCRIPTION("Microsoft Azure Network Adapter IB driver"); MODULE_LICENSE("GPL"); @@ -55,7 +56,7 @@ static int mana_ib_probe(struct auxiliary_device *adev, { struct mana_adev *madev = container_of(adev, struct mana_adev, adev); struct gdma_dev *mdev = madev->mdev; - struct net_device *upper_ndev; + struct net_device *ndev; struct mana_context *mc; struct mana_ib_dev *dev; u8 mac_addr[ETH_ALEN]; @@ -83,16 +84,17 @@ static int mana_ib_probe(struct auxiliary_device *adev, dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues; dev->ib_dev.dev.parent = mdev->gdma_context->dev; - rcu_read_lock(); /* required to get upper dev */ - upper_ndev = netdev_master_upper_dev_get_rcu(mc->ports[0]); - if (!upper_ndev) { + rcu_read_lock(); /* required to get primary netdev */ + ndev = mana_get_primary_netdev_rcu(mc, 0); + if (!ndev) { rcu_read_unlock(); ret = -ENODEV; - ibdev_err(&dev->ib_dev, "Failed to get master netdev"); + ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1"); goto free_ib_device; } - ether_addr_copy(mac_addr, upper_ndev->dev_addr); - ret = ib_device_set_netdev(&dev->ib_dev, upper_ndev, 1); + ether_addr_copy(mac_addr, ndev->dev_addr); + addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr); + ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1); rcu_read_unlock(); if (ret) { ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret); diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 111fa88a3be4..9a439569ffcf 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -829,7 +829,7 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) { - char alias_wq_name[15]; + char alias_wq_name[22]; int ret = 0; int i, j; union ib_gid gid; diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index a37cfac5e23f..dc9cf45d2d32 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -2158,7 +2158,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, struct mlx4_ib_demux_ctx *ctx, int port) { - char name[12]; + char name[21]; int ret = 0; int i; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index f255a12e26a0..f9abdca3493a 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -115,6 +115,19 @@ unsigned long __mlx5_umem_find_best_quantized_pgoff( __mlx5_bit_sz(typ, page_offset_fld), 0, scale, \ page_offset_quantized) +static inline unsigned long +mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf *umem_dmabuf) +{ + /* + * mkeys used for dmabuf are fixed at PAGE_SIZE because we must be able + * to hold any sgl after a move operation. Ideally the mkc page size + * could be changed at runtime to be optimal, but right now the driver + * cannot do that. + */ + return ib_umem_find_best_pgsz(&umem_dmabuf->umem, PAGE_SIZE, + umem_dmabuf->umem.iova); +} + enum { MLX5_IB_MMAP_OFFSET_START = 9, MLX5_IB_MMAP_OFFSET_END = 255, diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 4a04cbc5b78a..a524181f34df 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -705,10 +705,8 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt, return err; } - page_size = mlx5_umem_find_best_pgsz(&umem_dmabuf->umem, mkc, - log_page_size, 0, - umem_dmabuf->umem.iova); - if (unlikely(page_size < PAGE_SIZE)) { + page_size = mlx5_umem_dmabuf_find_best_pgsz(umem_dmabuf); + if (!page_size) { ib_umem_dmabuf_unmap_pages(umem_dmabuf); err = -EINVAL; } else { diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index cd14c4c2dff9..479c07e6e4ed 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -424,7 +424,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, int paylen; int solicited; u32 qp_num; - int ack_req; + int ack_req = 0; /* length from start of bth to end of icrc */ paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE; @@ -445,8 +445,9 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : qp->attr.dest_qp_num; - ack_req = ((pkt->mask & RXE_END_MASK) || - (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK)); + if (qp_type(qp) != IB_QPT_UD && qp_type(qp) != IB_QPT_UC) + ack_req = ((pkt->mask & RXE_END_MASK) || + (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK)); if (ack_req) qp->req.noack_pkts = 0; diff --git a/drivers/input/keyboard/qt1050.c b/drivers/input/keyboard/qt1050.c index b51dfcd76038..056e9bc26026 100644 --- a/drivers/input/keyboard/qt1050.c +++ b/drivers/input/keyboard/qt1050.c @@ -226,7 +226,12 @@ static bool qt1050_identify(struct qt1050_priv *ts) int err; /* Read Chip ID */ - regmap_read(ts->regmap, QT1050_CHIP_ID, &val); + err = regmap_read(ts->regmap, QT1050_CHIP_ID, &val); + if (err) { + dev_err(&ts->client->dev, "Failed to read chip ID: %d\n", err); + return false; + } + if (val != QT1050_CHIP_ID_VER) { dev_err(&ts->client->dev, "ID %d not supported\n", val); return false; diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index c2aec5c360b3..ce96513b34f6 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1356,6 +1356,8 @@ static int elan_suspend(struct device *dev) } err: + if (ret) + enable_irq(client->irq); mutex_unlock(&data->sysfs_mutex); return ret; } diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c index ba4cc08684d6..ccbdc6202c07 100644 --- a/drivers/interconnect/qcom/qcm2290.c +++ b/drivers/interconnect/qcom/qcm2290.c @@ -166,7 +166,7 @@ static struct qcom_icc_node mas_snoc_bimc = { .qos.ap_owned = true, .qos.qos_port = 6, .qos.qos_mode = NOC_QOS_MODE_BYPASS, - .mas_rpm_id = 164, + .mas_rpm_id = 3, .slv_rpm_id = -1, .num_links = ARRAY_SIZE(mas_snoc_bimc_links), .links = mas_snoc_bimc_links, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index ab415e107054..f456bcf1890b 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2302,7 +2302,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu, struct arm_smmu_domain *smmu_domain) { int ret; - u32 asid; + u32 asid = 0; struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; refcount_set(&cd->refs, 1); diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c index 552199cbd9e2..482c40aa029b 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c @@ -488,7 +488,7 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev) return ret; } -static int qcom_tbu_probe(struct platform_device *pdev) +int qcom_tbu_probe(struct platform_device *pdev) { struct of_phandle_args args = { .args_count = 2 }; struct device_node *np = pdev->dev.of_node; @@ -530,18 +530,3 @@ static int qcom_tbu_probe(struct platform_device *pdev) return 0; } - -static const struct of_device_id qcom_tbu_of_match[] = { - { .compatible = "qcom,sc7280-tbu" }, - { .compatible = "qcom,sdm845-tbu" }, - { } -}; - -static struct platform_driver qcom_tbu_driver = { - .driver = { - .name = "qcom_tbu", - .of_match_table = qcom_tbu_of_match, - }, - .probe = qcom_tbu_probe, -}; -builtin_platform_driver(qcom_tbu_driver); diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index 25f034677f56..13f3e2efb2cc 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -8,6 +8,8 @@ #include <linux/delay.h> #include <linux/of_device.h> #include <linux/firmware/qcom/qcom_scm.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include "arm-smmu.h" #include "arm-smmu-qcom.h" @@ -561,10 +563,47 @@ static struct acpi_platform_list qcom_acpi_platlist[] = { }; #endif +static int qcom_smmu_tbu_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + + if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM_DEBUG)) { + ret = qcom_tbu_probe(pdev); + if (ret) + return ret; + } + + if (dev->pm_domain) { + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + + return 0; +} + +static const struct of_device_id qcom_smmu_tbu_of_match[] = { + { .compatible = "qcom,sc7280-tbu" }, + { .compatible = "qcom,sdm845-tbu" }, + { } +}; + +static struct platform_driver qcom_smmu_tbu_driver = { + .driver = { + .name = "qcom_tbu", + .of_match_table = qcom_smmu_tbu_of_match, + }, + .probe = qcom_smmu_tbu_probe, +}; + struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) { const struct device_node *np = smmu->dev->of_node; const struct of_device_id *match; + static u8 tbu_registered; + + if (!tbu_registered++) + platform_driver_register(&qcom_smmu_tbu_driver); #ifdef CONFIG_ACPI if (np == NULL) { diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h index 9bb3ae7d62da..3c134d1a6277 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h @@ -34,8 +34,10 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev); #ifdef CONFIG_ARM_SMMU_QCOM_DEBUG void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu); +int qcom_tbu_probe(struct platform_device *pdev); #else static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { } +static inline int qcom_tbu_probe(struct platform_device *pdev) { return -EINVAL; } #endif #endif /* _ARM_SMMU_QCOM_H */ diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c index e8418cdd8331..44e92638c0cd 100644 --- a/drivers/iommu/intel/cache.c +++ b/drivers/iommu/intel/cache.c @@ -245,7 +245,8 @@ static unsigned long calculate_psi_aligned_address(unsigned long start, * shared_bits are all equal in both pfn and end_pfn. */ shared_bits = ~(pfn ^ end_pfn) & ~bitmask; - mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG; + mask = shared_bits ? __ffs(shared_bits) : MAX_AGAW_PFN_WIDTH; + aligned_pages = 1UL << mask; } *_pages = aligned_pages; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index fd11a080380c..f55ec1fd7942 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2071,7 +2071,7 @@ static int __init si_domain_init(int hw) for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { ret = iommu_domain_identity_map(si_domain, mm_to_dma_pfn_start(start_pfn), - mm_to_dma_pfn_end(end_pfn)); + mm_to_dma_pfn_end(end_pfn-1)); if (ret) return ret; } diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c index db8c46bee155..e33ddfc239b5 100644 --- a/drivers/iommu/iommufd/iova_bitmap.c +++ b/drivers/iommu/iommufd/iova_bitmap.c @@ -384,8 +384,6 @@ static int iova_bitmap_advance(struct iova_bitmap *bitmap) bitmap->mapped_base_index += count; iova_bitmap_put(bitmap); - if (iova_bitmap_done(bitmap)) - return 0; /* Iterate, set and skip any bits requested for next iteration */ if (bitmap->set_ahead_length) { @@ -396,6 +394,9 @@ static int iova_bitmap_advance(struct iova_bitmap *bitmap) return ret; } + if (iova_bitmap_done(bitmap)) + return 0; + /* When advancing the index we pin the next set of bitmap pages */ return iova_bitmap_get(bitmap); } diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 7a2199470f31..654ed3339095 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -1334,7 +1334,7 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, } max = length / page_size; - bitmap_size = max / BITS_PER_BYTE; + bitmap_size = DIV_ROUND_UP(max, BITS_PER_BYTE); tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT); if (!tmp) { diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c index ba53571a8239..a2f4ffe6d949 100644 --- a/drivers/iommu/sprd-iommu.c +++ b/drivers/iommu/sprd-iommu.c @@ -232,8 +232,8 @@ static void sprd_iommu_cleanup(struct sprd_iommu_domain *dom) pgt_size = sprd_iommu_pgt_size(&dom->domain); dma_free_coherent(dom->sdev->dev, pgt_size, dom->pgt_va, dom->pgt_pa); - dom->sdev = NULL; sprd_iommu_hw_en(dom->sdev, false); + dom->sdev = NULL; } static void sprd_iommu_domain_free(struct iommu_domain *domain) diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c index 20cf7a9e9ece..75a0e980ff35 100644 --- a/drivers/irqchip/irq-imx-irqsteer.c +++ b/drivers/irqchip/irq-imx-irqsteer.c @@ -36,6 +36,7 @@ struct irqsteer_data { int channel; struct irq_domain *domain; u32 *saved_reg; + struct device *dev; }; static int imx_irqsteer_get_reg_index(struct irqsteer_data *data, @@ -72,10 +73,26 @@ static void imx_irqsteer_irq_mask(struct irq_data *d) raw_spin_unlock_irqrestore(&data->lock, flags); } +static void imx_irqsteer_irq_bus_lock(struct irq_data *d) +{ + struct irqsteer_data *data = d->chip_data; + + pm_runtime_get_sync(data->dev); +} + +static void imx_irqsteer_irq_bus_sync_unlock(struct irq_data *d) +{ + struct irqsteer_data *data = d->chip_data; + + pm_runtime_put_autosuspend(data->dev); +} + static const struct irq_chip imx_irqsteer_irq_chip = { - .name = "irqsteer", - .irq_mask = imx_irqsteer_irq_mask, - .irq_unmask = imx_irqsteer_irq_unmask, + .name = "irqsteer", + .irq_mask = imx_irqsteer_irq_mask, + .irq_unmask = imx_irqsteer_irq_unmask, + .irq_bus_lock = imx_irqsteer_irq_bus_lock, + .irq_bus_sync_unlock = imx_irqsteer_irq_bus_sync_unlock, }; static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq, @@ -150,6 +167,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev) if (!data) return -ENOMEM; + data->dev = &pdev->dev; data->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->regs)) { dev_err(&pdev->dev, "failed to initialize reg\n"); diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 2e5cb9dde3ec..44383cec1f47 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -1900,7 +1900,7 @@ hfcmulti_dtmf(struct hfc_multi *hc) static void hfcmulti_tx(struct hfc_multi *hc, int ch) { - int i, ii, temp, len = 0; + int i, ii, temp, tmp_len, len = 0; int Zspace, z1, z2; /* must be int for calculation */ int Fspace, f1, f2; u_char *d; @@ -2121,14 +2121,15 @@ hfcmulti_tx(struct hfc_multi *hc, int ch) HFC_wait_nodebug(hc); } + tmp_len = (*sp)->len; dev_kfree_skb(*sp); /* check for next frame */ if (bch && get_next_bframe(bch)) { - len = (*sp)->len; + len = tmp_len; goto next_frame; } if (dch && get_next_dframe(dch)) { - len = (*sp)->len; + len = tmp_len; goto next_frame; } diff --git a/drivers/leds/flash/leds-mt6360.c b/drivers/leds/flash/leds-mt6360.c index 1b75b4d36834..4c74f1cf01f0 100644 --- a/drivers/leds/flash/leds-mt6360.c +++ b/drivers/leds/flash/leds-mt6360.c @@ -643,14 +643,17 @@ static int mt6360_init_isnk_properties(struct mt6360_led *led, ret = fwnode_property_read_u32(child, "reg", ®); if (ret || reg > MT6360_LED_ISNK3 || - priv->leds_active & BIT(reg)) + priv->leds_active & BIT(reg)) { + fwnode_handle_put(child); return -EINVAL; + } ret = fwnode_property_read_u32(child, "color", &color); if (ret) { dev_err(priv->dev, "led %d, no color specified\n", led->led_no); + fwnode_handle_put(child); return ret; } diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c index 7c99a3039171..bf70bf6fb0d5 100644 --- a/drivers/leds/flash/leds-qcom-flash.c +++ b/drivers/leds/flash/leds-qcom-flash.c @@ -505,6 +505,7 @@ qcom_flash_v4l2_init(struct device *dev, struct qcom_flash_led *led, struct fwno struct qcom_flash_data *flash_data = led->flash_data; struct v4l2_flash_config v4l2_cfg = { 0 }; struct led_flash_setting *intensity = &v4l2_cfg.intensity; + struct v4l2_flash *v4l2_flash; if (!(led->flash.led_cdev.flags & LED_DEV_CAP_FLASH)) return 0; @@ -523,9 +524,12 @@ qcom_flash_v4l2_init(struct device *dev, struct qcom_flash_led *led, struct fwno LED_FAULT_OVER_TEMPERATURE | LED_FAULT_TIMEOUT; - flash_data->v4l2_flash[flash_data->leds_count] = - v4l2_flash_init(dev, fwnode, &led->flash, &qcom_v4l2_flash_ops, &v4l2_cfg); - return PTR_ERR_OR_ZERO(flash_data->v4l2_flash); + v4l2_flash = v4l2_flash_init(dev, fwnode, &led->flash, &qcom_v4l2_flash_ops, &v4l2_cfg); + if (IS_ERR(v4l2_flash)) + return PTR_ERR(v4l2_flash); + + flash_data->v4l2_flash[flash_data->leds_count] = v4l2_flash; + return 0; } # else static int diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index ba1be15cfd8e..c66d1bead0a4 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -258,7 +258,6 @@ struct led_classdev *of_led_get(struct device_node *np, int index) led_dev = class_find_device_by_of_node(&leds_class, led_node); of_node_put(led_node); - put_device(led_dev); return led_module_get(led_dev); } diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index b1b323b19301..6345c52afb56 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -179,9 +179,9 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig) cancel_work_sync(&led_cdev->set_brightness_work); led_stop_software_blink(led_cdev); + device_remove_groups(led_cdev->dev, led_cdev->trigger->groups); if (led_cdev->trigger->deactivate) led_cdev->trigger->deactivate(led_cdev); - device_remove_groups(led_cdev->dev, led_cdev->trigger->groups); led_cdev->trigger = NULL; led_cdev->trigger_data = NULL; led_cdev->activated = false; @@ -194,6 +194,12 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig) spin_unlock(&trig->leddev_list_lock); led_cdev->trigger = trig; + /* + * If "set brightness to 0" is pending in workqueue, + * we don't want that to be reordered after ->activate() + */ + flush_work(&led_cdev->set_brightness_work); + ret = 0; if (trig->activate) ret = trig->activate(led_cdev); diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index fcaa34706b6c..2ef9fc7371bd 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -356,8 +356,10 @@ static int ich7_lpc_probe(struct pci_dev *dev, nas_gpio_pci_dev = dev; status = pci_read_config_dword(dev, PMBASE, &g_pm_io_base); - if (status) + if (status) { + status = pcibios_err_to_errno(status); goto out; + } g_pm_io_base &= 0x00000ff80; status = pci_read_config_dword(dev, GPIO_CTRL, &gc); @@ -369,8 +371,9 @@ static int ich7_lpc_probe(struct pci_dev *dev, } status = pci_read_config_dword(dev, GPIO_BASE, &nas_gpio_io_base); - if (0 > status) { + if (status) { dev_info(&dev->dev, "Unable to read GPIOBASE.\n"); + status = pcibios_err_to_errno(status); goto out; } dev_dbg(&dev->dev, ": GPIOBASE = 0x%08x\n", nas_gpio_io_base); diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c index 9467c796bd04..e74b2ceed1c2 100644 --- a/drivers/leds/rgb/leds-qcom-lpg.c +++ b/drivers/leds/rgb/leds-qcom-lpg.c @@ -2,7 +2,7 @@ /* * Copyright (c) 2017-2022 Linaro Ltd * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. - * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/bits.h> #include <linux/bitfield.h> @@ -254,6 +254,9 @@ static int lpg_clear_pbs_trigger(struct lpg *lpg, unsigned int lut_mask) u8 val = 0; int rc; + if (!lpg->lpg_chan_sdam) + return 0; + lpg->pbs_en_bitmap &= (~lut_mask); if (!lpg->pbs_en_bitmap) { rc = nvmem_device_write(lpg->lpg_chan_sdam, SDAM_REG_PBS_SEQ_EN, 1, &val); @@ -276,6 +279,9 @@ static int lpg_set_pbs_trigger(struct lpg *lpg, unsigned int lut_mask) u8 val = PBS_SW_TRIG_BIT; int rc; + if (!lpg->lpg_chan_sdam) + return 0; + if (!lpg->pbs_en_bitmap) { rc = nvmem_device_write(lpg->lpg_chan_sdam, SDAM_REG_PBS_SEQ_EN, 1, &val); if (rc < 0) diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c index b4688d1d9d2b..1d213c999d40 100644 --- a/drivers/leds/trigger/ledtrig-timer.c +++ b/drivers/leds/trigger/ledtrig-timer.c @@ -110,11 +110,6 @@ static int timer_trig_activate(struct led_classdev *led_cdev) led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER; } - /* - * If "set brightness to 0" is pending in workqueue, we don't - * want that to be reordered after blink_set() - */ - flush_work(&led_cdev->set_brightness_work); led_blink_set(led_cdev, &led_cdev->blink_delay_on, &led_cdev->blink_delay_off); diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index 37cdc6931f6d..2576a53f247e 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -549,7 +549,7 @@ g4fan_exit( void ) platform_driver_unregister( &therm_of_driver ); if( x.of_dev ) - of_device_unregister( x.of_dev ); + of_platform_device_destroy(&x.of_dev->dev, NULL); } module_init(g4fan_init); diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 933727f89431..d17efb1dd0cb 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -225,6 +225,8 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv, void *data) { u32 *arg = data; + u32 val; + int ret; switch (cp->type) { case IMX_MU_TYPE_TX: @@ -236,7 +238,13 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv, queue_work(system_bh_wq, &cp->txdb_work); break; case IMX_MU_TYPE_TXDB_V2: - imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0); + imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), + priv->dcfg->xCR[IMX_MU_GCR]); + ret = readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val, + !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)), + 0, 1000); + if (ret) + dev_warn_ratelimited(priv->dev, "channel type: %d failure\n", cp->type); break; default: dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type); diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index 4aa394e91109..63b5e3fe7528 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -662,12 +662,6 @@ static int cmdq_probe(struct platform_device *pdev) cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i]; } - err = devm_mbox_controller_register(dev, &cmdq->mbox); - if (err < 0) { - dev_err(dev, "failed to register mailbox: %d\n", err); - return err; - } - platform_set_drvdata(pdev, cmdq); WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks)); @@ -695,6 +689,12 @@ static int cmdq_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS); pm_runtime_use_autosuspend(dev); + err = devm_mbox_controller_register(dev, &cmdq->mbox); + if (err < 0) { + dev_err(dev, "failed to register mailbox: %d\n", err); + return err; + } + return 0; } diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c index 46747559b438..7a87424657a1 100644 --- a/drivers/mailbox/omap-mailbox.c +++ b/drivers/mailbox/omap-mailbox.c @@ -230,7 +230,8 @@ static int omap_mbox_startup(struct omap_mbox *mbox) int ret = 0; ret = request_threaded_irq(mbox->irq, NULL, mbox_interrupt, - IRQF_ONESHOT, mbox->name, mbox); + IRQF_SHARED | IRQF_ONESHOT, mbox->name, + mbox); if (unlikely(ret)) { pr_err("failed to register mailbox interrupt:%d\n", ret); return ret; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index abe88d1e6735..b149ac46a990 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -4101,10 +4101,11 @@ static void raid_resume(struct dm_target *ti) if (mddev->delta_disks < 0) rs_set_capacity(rs); + mddev_lock_nointr(mddev); WARN_ON_ONCE(!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)); - WARN_ON_ONCE(test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); + WARN_ON_ONCE(rcu_dereference_protected(mddev->sync_thread, + lockdep_is_held(&mddev->reconfig_mutex))); clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags); - mddev_lock_nointr(mddev); mddev->ro = 0; mddev->in_sync = 0; md_unfrozen_sync_thread(mddev); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index b2d5246cff21..2fc847af254d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -2028,10 +2028,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, dm_table_any_dev_attr(t, device_is_not_random, NULL)) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); - /* - * For a zoned target, setup the zones related queue attributes - * and resources necessary for zone append emulation if necessary. - */ + /* For a zoned table, setup the zone related queue attributes. */ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) { r = dm_set_zones_restrictions(t, q, limits); if (r) @@ -2042,6 +2039,16 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (r) return r; + /* + * Now that the limits are set, check the zones mapped by the table + * and setup the resources for zone append emulation if necessary. + */ + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && limits->zoned) { + r = dm_revalidate_zones(t, q); + if (r) + return r; + } + dm_update_crypto_profile(q, t); /* diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index bb5da66da4c1..431a61e1a8b7 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1538,14 +1538,6 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv) return r; } -/* - * Check whether a DM target is a verity target. - */ -bool dm_is_verity_target(struct dm_target *ti) -{ - return ti->type->module == THIS_MODULE; -} - /* * Get the verity mode (error behavior) of a verity target. * @@ -1599,6 +1591,14 @@ static struct target_type verity_target = { }; module_dm(verity); +/* + * Check whether a DM target is a verity target. + */ +bool dm_is_verity_target(struct dm_target *ti) +{ + return ti->type == &verity_target; +} + MODULE_AUTHOR("Mikulas Patocka <mpatocka@xxxxxxxxxx>"); MODULE_AUTHOR("Mandeep Baines <msb@xxxxxxxxxxxx>"); MODULE_AUTHOR("Will Drewry <wad@xxxxxxxxxxxx>"); diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 5d66d916730e..75d0019a0649 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -166,14 +166,22 @@ static int dm_check_zoned_cb(struct blk_zone *zone, unsigned int idx, * blk_revalidate_disk_zones() function here as the mapped device is suspended * (this is called from __bind() context). */ -static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t) +int dm_revalidate_zones(struct dm_table *t, struct request_queue *q) { + struct mapped_device *md = t->md; struct gendisk *disk = md->disk; int ret; + if (!get_capacity(disk)) + return 0; + /* Revalidate only if something changed. */ - if (!disk->nr_zones || disk->nr_zones != md->nr_zones) + if (!disk->nr_zones || disk->nr_zones != md->nr_zones) { + DMINFO("%s using %s zone append", + disk->disk_name, + queue_emulates_zone_append(q) ? "emulated" : "native"); md->nr_zones = 0; + } if (md->nr_zones) return 0; @@ -240,9 +248,6 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, lim->max_zone_append_sectors = 0; } - if (!get_capacity(md->disk)) - return 0; - /* * Count conventional zones to check that the mapped device will indeed * have sequential write required zones. @@ -269,16 +274,6 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, return 0; } - if (!md->disk->nr_zones) { - DMINFO("%s using %s zone append", - md->disk->disk_name, - queue_emulates_zone_append(q) ? "emulated" : "native"); - } - - ret = dm_revalidate_zones(md, t); - if (ret < 0) - return ret; - if (!static_key_enabled(&zoned_enabled.key)) static_branch_enable(&zoned_enabled); return 0; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 53ef8207fe2c..c984ecb64b1e 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -103,6 +103,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t); */ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *lim); +int dm_revalidate_zones(struct dm_table *t, struct request_queue *q); void dm_zone_endio(struct dm_io *io, struct bio *clone); #ifdef CONFIG_BLK_DEV_ZONED int dm_blk_report_zones(struct gendisk *disk, sector_t sector, diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 0a2d37eb38ef..08232d8dc815 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -227,6 +227,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap, struct block_device *bdev; struct mddev *mddev = bitmap->mddev; struct bitmap_storage *store = &bitmap->storage; + unsigned int bitmap_limit = (bitmap->storage.file_pages - pg_index) << + PAGE_SHIFT; loff_t sboff, offset = mddev->bitmap_info.offset; sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE; unsigned int size = PAGE_SIZE; @@ -269,11 +271,9 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap, if (size == 0) /* bitmap runs in to data */ return -EINVAL; - } else { - /* DATA METADATA BITMAP - no problems */ } - md_super_write(mddev, rdev, sboff + ps, (int) size, page); + md_super_write(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit), page); return 0; } diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 8e36a0feec09..b5a802ae17bb 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -15,6 +15,7 @@ #define LVB_SIZE 64 #define NEW_DEV_TIMEOUT 5000 +#define WAIT_DLM_LOCK_TIMEOUT (30 * HZ) struct dlm_lock_resource { dlm_lockspace_t *ls; @@ -130,8 +131,13 @@ static int dlm_lock_sync(struct dlm_lock_resource *res, int mode) 0, sync_ast, res, res->bast); if (ret) return ret; - wait_event(res->sync_locking, res->sync_locking_done); + ret = wait_event_timeout(res->sync_locking, res->sync_locking_done, + WAIT_DLM_LOCK_TIMEOUT); res->sync_locking_done = false; + if (!ret) { + pr_err("locking DLM '%s' timeout!\n", res->name); + return -EBUSY; + } if (res->lksb.sb_status == 0) res->mode = mode; return res->lksb.sb_status; @@ -743,7 +749,7 @@ static void unlock_comm(struct md_cluster_info *cinfo) */ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg) { - int error; + int error, unlock_error; int slot = cinfo->slot_number - 1; cmsg->slot = cpu_to_le32(slot); @@ -751,7 +757,7 @@ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg) error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX); if (error) { pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error); - goto failed_message; + return error; } memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg, @@ -781,14 +787,10 @@ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg) } failed_ack: - error = dlm_unlock_sync(cinfo->message_lockres); - if (unlikely(error != 0)) { + while ((unlock_error = dlm_unlock_sync(cinfo->message_lockres))) pr_err("md-cluster: failed convert to NL on MESSAGE(%d)\n", - error); - /* in case the message can't be released due to some reason */ - goto failed_ack; - } -failed_message: + unlock_error); + return error; } diff --git a/drivers/md/md.c b/drivers/md/md.c index aff9118ff697..9c5be016e507 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -550,13 +550,9 @@ static void md_end_flush(struct bio *bio) rdev_dec_pending(rdev, mddev); - if (atomic_dec_and_test(&mddev->flush_pending)) { - /* The pair is percpu_ref_get() from md_flush_request() */ - percpu_ref_put(&mddev->active_io); - + if (atomic_dec_and_test(&mddev->flush_pending)) /* The pre-request flush has finished */ queue_work(md_wq, &mddev->flush_work); - } } static void md_submit_flush_data(struct work_struct *ws); @@ -587,12 +583,8 @@ static void submit_flushes(struct work_struct *ws) rcu_read_lock(); } rcu_read_unlock(); - if (atomic_dec_and_test(&mddev->flush_pending)) { - /* The pair is percpu_ref_get() from md_flush_request() */ - percpu_ref_put(&mddev->active_io); - + if (atomic_dec_and_test(&mddev->flush_pending)) queue_work(md_wq, &mddev->flush_work); - } } static void md_submit_flush_data(struct work_struct *ws) @@ -617,8 +609,20 @@ static void md_submit_flush_data(struct work_struct *ws) bio_endio(bio); } else { bio->bi_opf &= ~REQ_PREFLUSH; - md_handle_request(mddev, bio); + + /* + * make_requst() will never return error here, it only + * returns error in raid5_make_request() by dm-raid. + * Since dm always splits data and flush operation into + * two separate io, io size of flush submitted by dm + * always is 0, make_request() will not be called here. + */ + if (WARN_ON_ONCE(!mddev->pers->make_request(mddev, bio))) + bio_io_error(bio);; } + + /* The pair is percpu_ref_get() from md_flush_request() */ + percpu_ref_put(&mddev->active_io); } /* @@ -7742,12 +7746,6 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode, return get_bitmap_file(mddev, argp); } - if (cmd == HOT_REMOVE_DISK) - /* need to ensure recovery thread has run */ - wait_event_interruptible_timeout(mddev->sb_wait, - !test_bit(MD_RECOVERY_NEEDED, - &mddev->recovery), - msecs_to_jiffies(5000)); if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { /* Need to flush page cache, and ensure no-one else opens * and writes diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c5d4aeb68404..81c01347cd24 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -365,18 +365,13 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks return array_sectors; } -static void free_conf(struct mddev *mddev, struct r0conf *conf) -{ - kfree(conf->strip_zone); - kfree(conf->devlist); - kfree(conf); -} - static void raid0_free(struct mddev *mddev, void *priv) { struct r0conf *conf = priv; - free_conf(mddev, conf); + kfree(conf->strip_zone); + kfree(conf->devlist); + kfree(conf); } static int raid0_set_limits(struct mddev *mddev) @@ -415,7 +410,7 @@ static int raid0_run(struct mddev *mddev) if (!mddev_is_dm(mddev)) { ret = raid0_set_limits(mddev); if (ret) - goto out_free_conf; + return ret; } /* calculate array device size */ @@ -427,13 +422,7 @@ static int raid0_run(struct mddev *mddev) dump_zones(mddev); - ret = md_integrity_register(mddev); - if (ret) - goto out_free_conf; - return 0; -out_free_conf: - free_conf(mddev, conf); - return ret; + return md_integrity_register(mddev); } /* diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7b8a71ca66dd..22bbd06ba6a2 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -680,6 +680,7 @@ static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio, len = r1_bio->sectors; read_len = raid1_check_read_range(rdev, this_sector, &len); if (read_len == r1_bio->sectors) { + *max_sectors = read_len; update_read_sectors(conf, disk, this_sector, read_len); return disk; } @@ -3204,7 +3205,6 @@ static int raid1_set_limits(struct mddev *mddev) return queue_limits_set(mddev->gendisk->queue, &lim); } -static void raid1_free(struct mddev *mddev, void *priv); static int raid1_run(struct mddev *mddev) { struct r1conf *conf; @@ -3238,7 +3238,7 @@ static int raid1_run(struct mddev *mddev) if (!mddev_is_dm(mddev)) { ret = raid1_set_limits(mddev); if (ret) - goto abort; + return ret; } mddev->degraded = 0; @@ -3252,8 +3252,7 @@ static int raid1_run(struct mddev *mddev) */ if (conf->raid_disks - mddev->degraded < 1) { md_unregister_thread(mddev, &conf->thread); - ret = -EINVAL; - goto abort; + return -EINVAL; } if (conf->raid_disks - mddev->degraded == 1) @@ -3277,14 +3276,8 @@ static int raid1_run(struct mddev *mddev) md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); ret = md_integrity_register(mddev); - if (ret) { + if (ret) md_unregister_thread(mddev, &mddev->thread); - goto abort; - } - return 0; - -abort: - raid1_free(mddev, conf); return ret; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 2bd1ce9b3922..1c6b58adec13 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -155,7 +155,7 @@ static int raid6_idx_to_slot(int idx, struct stripe_head *sh, return slot; } -static void print_raid5_conf (struct r5conf *conf); +static void print_raid5_conf(struct r5conf *conf); static int stripe_operations_active(struct stripe_head *sh) { @@ -5899,6 +5899,39 @@ static int add_all_stripe_bios(struct r5conf *conf, return ret; } +enum reshape_loc { + LOC_NO_RESHAPE, + LOC_AHEAD_OF_RESHAPE, + LOC_INSIDE_RESHAPE, + LOC_BEHIND_RESHAPE, +}; + +static enum reshape_loc get_reshape_loc(struct mddev *mddev, + struct r5conf *conf, sector_t logical_sector) +{ + sector_t reshape_progress, reshape_safe; + /* + * Spinlock is needed as reshape_progress may be + * 64bit on a 32bit platform, and so it might be + * possible to see a half-updated value + * Of course reshape_progress could change after + * the lock is dropped, so once we get a reference + * to the stripe that we think it is, we will have + * to check again. + */ + spin_lock_irq(&conf->device_lock); + reshape_progress = conf->reshape_progress; + reshape_safe = conf->reshape_safe; + spin_unlock_irq(&conf->device_lock); + if (reshape_progress == MaxSector) + return LOC_NO_RESHAPE; + if (ahead_of_reshape(mddev, logical_sector, reshape_progress)) + return LOC_AHEAD_OF_RESHAPE; + if (ahead_of_reshape(mddev, logical_sector, reshape_safe)) + return LOC_INSIDE_RESHAPE; + return LOC_BEHIND_RESHAPE; +} + static enum stripe_result make_stripe_request(struct mddev *mddev, struct r5conf *conf, struct stripe_request_ctx *ctx, sector_t logical_sector, struct bio *bi) @@ -5913,28 +5946,14 @@ static enum stripe_result make_stripe_request(struct mddev *mddev, seq = read_seqcount_begin(&conf->gen_lock); if (unlikely(conf->reshape_progress != MaxSector)) { - /* - * Spinlock is needed as reshape_progress may be - * 64bit on a 32bit platform, and so it might be - * possible to see a half-updated value - * Of course reshape_progress could change after - * the lock is dropped, so once we get a reference - * to the stripe that we think it is, we will have - * to check again. - */ - spin_lock_irq(&conf->device_lock); - if (ahead_of_reshape(mddev, logical_sector, - conf->reshape_progress)) { - previous = 1; - } else { - if (ahead_of_reshape(mddev, logical_sector, - conf->reshape_safe)) { - spin_unlock_irq(&conf->device_lock); - ret = STRIPE_SCHEDULE_AND_RETRY; - goto out; - } + enum reshape_loc loc = get_reshape_loc(mddev, conf, + logical_sector); + if (loc == LOC_INSIDE_RESHAPE) { + ret = STRIPE_SCHEDULE_AND_RETRY; + goto out; } - spin_unlock_irq(&conf->device_lock); + if (loc == LOC_AHEAD_OF_RESHAPE) + previous = 1; } new_sector = raid5_compute_sector(conf, logical_sector, previous, @@ -6113,8 +6132,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) /* Bail out if conflicts with reshape and REQ_NOWAIT is set */ if ((bi->bi_opf & REQ_NOWAIT) && (conf->reshape_progress != MaxSector) && - !ahead_of_reshape(mddev, logical_sector, conf->reshape_progress) && - ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) { + get_reshape_loc(mddev, conf, logical_sector) == LOC_INSIDE_RESHAPE) { bio_wouldblock_error(bi); if (rw == WRITE) md_write_end(mddev); @@ -7562,11 +7580,11 @@ static struct r5conf *setup_conf(struct mddev *mddev) if (test_bit(Replacement, &rdev->flags)) { if (disk->replacement) goto abort; - RCU_INIT_POINTER(disk->replacement, rdev); + disk->replacement = rdev; } else { if (disk->rdev) goto abort; - RCU_INIT_POINTER(disk->rdev, rdev); + disk->rdev = rdev; } if (test_bit(In_sync, &rdev->flags)) { @@ -8048,7 +8066,7 @@ static void raid5_status(struct seq_file *seq, struct mddev *mddev) seq_printf (seq, "]"); } -static void print_raid5_conf (struct r5conf *conf) +static void print_raid5_conf(struct r5conf *conf) { struct md_rdev *rdev; int i; @@ -8062,15 +8080,13 @@ static void print_raid5_conf (struct r5conf *conf) conf->raid_disks, conf->raid_disks - conf->mddev->degraded); - rcu_read_lock(); for (i = 0; i < conf->raid_disks; i++) { - rdev = rcu_dereference(conf->disks[i].rdev); + rdev = conf->disks[i].rdev; if (rdev) pr_debug(" disk %d, o:%d, dev:%pg\n", i, !test_bit(Faulty, &rdev->flags), rdev->bdev); } - rcu_read_unlock(); } static int raid5_spare_active(struct mddev *mddev) diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index c6d3ee472d81..742bc665602e 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -679,6 +679,7 @@ config VIDEO_THP7312 tristate "THine THP7312 support" depends on I2C select FW_LOADER + select FW_UPLOAD select MEDIA_CONTROLLER select V4L2_CCI_I2C select V4L2_FWNODE diff --git a/drivers/media/i2c/alvium-csi2.c b/drivers/media/i2c/alvium-csi2.c index e65702e3f73e..272d892da56e 100644 --- a/drivers/media/i2c/alvium-csi2.c +++ b/drivers/media/i2c/alvium-csi2.c @@ -1962,7 +1962,7 @@ static int alvium_g_volatile_ctrl(struct v4l2_ctrl *ctrl) int val; switch (ctrl->id) { - case V4L2_CID_GAIN: + case V4L2_CID_ANALOGUE_GAIN: val = alvium_get_gain(alvium); if (val < 0) return val; @@ -1994,7 +1994,7 @@ static int alvium_s_ctrl(struct v4l2_ctrl *ctrl) return 0; switch (ctrl->id) { - case V4L2_CID_GAIN: + case V4L2_CID_ANALOGUE_GAIN: ret = alvium_set_ctrl_gain(alvium, ctrl->val); break; case V4L2_CID_AUTOGAIN: @@ -2123,7 +2123,7 @@ static int alvium_ctrl_init(struct alvium_dev *alvium) if (alvium->avail_ft.gain) { ctrls->gain = v4l2_ctrl_new_std(hdl, ops, - V4L2_CID_GAIN, + V4L2_CID_ANALOGUE_GAIN, alvium->min_gain, alvium->max_gain, alvium->inc_gain, diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c index 9c565ec033d4..52d9ca68a86c 100644 --- a/drivers/media/i2c/hi846.c +++ b/drivers/media/i2c/hi846.c @@ -1851,7 +1851,7 @@ static int hi846_get_selection(struct v4l2_subdev *sd, mutex_lock(&hi846->mutex); switch (sel->which) { case V4L2_SUBDEV_FORMAT_TRY: - v4l2_subdev_state_get_crop(sd_state, sel->pad); + sel->r = *v4l2_subdev_state_get_crop(sd_state, sel->pad); break; case V4L2_SUBDEV_FORMAT_ACTIVE: sel->r = hi846->cur_mode->crop; diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c index 51ebf5453fce..e78a80b2bb2e 100644 --- a/drivers/media/i2c/imx219.c +++ b/drivers/media/i2c/imx219.c @@ -162,8 +162,8 @@ static const struct cci_reg_sequence imx219_common_regs[] = { { IMX219_REG_MODE_SELECT, 0x00 }, /* Mode Select */ /* To Access Addresses 3000-5fff, send the following commands */ - { CCI_REG8(0x30eb), 0x0c }, { CCI_REG8(0x30eb), 0x05 }, + { CCI_REG8(0x30eb), 0x0c }, { CCI_REG8(0x300a), 0xff }, { CCI_REG8(0x300b), 0xff }, { CCI_REG8(0x30eb), 0x05 }, diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c index 0efce329525e..7d1f7af0a9df 100644 --- a/drivers/media/i2c/imx412.c +++ b/drivers/media/i2c/imx412.c @@ -542,14 +542,13 @@ static int imx412_update_controls(struct imx412 *imx412, */ static int imx412_update_exp_gain(struct imx412 *imx412, u32 exposure, u32 gain) { - u32 lpfr, shutter; + u32 lpfr; int ret; lpfr = imx412->vblank + imx412->cur_mode->height; - shutter = lpfr - exposure; - dev_dbg(imx412->dev, "Set exp %u, analog gain %u, shutter %u, lpfr %u", - exposure, gain, shutter, lpfr); + dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u", + exposure, gain, lpfr); ret = imx412_write_reg(imx412, IMX412_REG_HOLD, 1, 1); if (ret) @@ -559,7 +558,7 @@ static int imx412_update_exp_gain(struct imx412 *imx412, u32 exposure, u32 gain) if (ret) goto error_release_group_hold; - ret = imx412_write_reg(imx412, IMX412_REG_EXPOSURE_CIT, 2, shutter); + ret = imx412_write_reg(imx412, IMX412_REG_EXPOSURE_CIT, 2, exposure); if (ret) goto error_release_group_hold; diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c index f04a89584334..16791a7f4f15 100644 --- a/drivers/media/pci/intel/ivsc/mei_csi.c +++ b/drivers/media/pci/intel/ivsc/mei_csi.c @@ -126,6 +126,8 @@ struct mei_csi { struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl *freq_ctrl; struct v4l2_ctrl *privacy_ctrl; + /* lock for v4l2 controls */ + struct mutex ctrl_lock; unsigned int remote_pad; /* start streaming or not */ int streaming; @@ -190,7 +192,11 @@ static int mei_csi_send(struct mei_csi *csi, u8 *buf, size_t len) /* command response status */ ret = csi->cmd_response.status; - if (ret) { + if (ret == -1) { + /* notify privacy on instead of reporting error */ + ret = 0; + v4l2_ctrl_s_ctrl(csi->privacy_ctrl, 1); + } else if (ret) { ret = -EINVAL; goto out; } @@ -559,11 +565,13 @@ static int mei_csi_init_controls(struct mei_csi *csi) u32 max; int ret; + mutex_init(&csi->ctrl_lock); + ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 2); if (ret) return ret; - csi->ctrl_handler.lock = &csi->lock; + csi->ctrl_handler.lock = &csi->ctrl_lock; max = ARRAY_SIZE(link_freq_menu_items) - 1; csi->freq_ctrl = v4l2_ctrl_new_int_menu(&csi->ctrl_handler, @@ -755,6 +763,7 @@ static int mei_csi_probe(struct mei_cl_device *cldev, err_ctrl_handler: v4l2_ctrl_handler_free(&csi->ctrl_handler); + mutex_destroy(&csi->ctrl_lock); v4l2_async_nf_unregister(&csi->notifier); v4l2_async_nf_cleanup(&csi->notifier); @@ -774,6 +783,7 @@ static void mei_csi_remove(struct mei_cl_device *cldev) v4l2_async_nf_unregister(&csi->notifier); v4l2_async_nf_cleanup(&csi->notifier); v4l2_ctrl_handler_free(&csi->ctrl_handler); + mutex_destroy(&csi->ctrl_lock); v4l2_async_unregister_subdev(&csi->subdev); v4l2_subdev_cleanup(&csi->subdev); media_entity_cleanup(&csi->subdev.entity); diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c index 99b9f55ca829..f467a00492f4 100644 --- a/drivers/media/pci/ivtv/ivtv-udma.c +++ b/drivers/media/pci/ivtv/ivtv-udma.c @@ -131,6 +131,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr, /* Fill SG List with new values */ if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) { + IVTV_DEBUG_WARN("%s: could not allocate bounce buffers for highmem userspace buffers\n", + __func__); unpin_user_pages(dma->map, dma->page_count); dma->page_count = 0; return -ENOMEM; @@ -139,6 +141,12 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr, /* Map SG List */ dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist, dma->page_count, DMA_TO_DEVICE); + if (!dma->SG_length) { + IVTV_DEBUG_WARN("%s: DMA map error, SG_length is 0\n", __func__); + unpin_user_pages(dma->map, dma->page_count); + dma->page_count = 0; + return -EINVAL; + } /* Fill SG Array with new values */ ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1); diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c index 582146f8d70d..2d9274537725 100644 --- a/drivers/media/pci/ivtv/ivtv-yuv.c +++ b/drivers/media/pci/ivtv/ivtv-yuv.c @@ -114,6 +114,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma, } dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist, dma->page_count, DMA_TO_DEVICE); + if (!dma->SG_length) { + IVTV_DEBUG_WARN("%s: DMA map error, SG_length is 0\n", __func__); + unpin_user_pages(dma->map, dma->page_count); + dma->page_count = 0; + return -EINVAL; + } /* Fill SG Array with new values */ ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size); diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c index 410477e3e621..d1ab7fee0d05 100644 --- a/drivers/media/pci/ivtv/ivtvfb.c +++ b/drivers/media/pci/ivtv/ivtvfb.c @@ -281,10 +281,10 @@ static int ivtvfb_prep_dec_dma_to_device(struct ivtv *itv, /* Map User DMA */ if (ivtv_udma_setup(itv, ivtv_dest_addr, userbuf, size_in_bytes) <= 0) { mutex_unlock(&itv->udma.lock); - IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, Error with pin_user_pages: %d bytes, %d pages returned\n", - size_in_bytes, itv->udma.page_count); + IVTVFB_WARN("%s, Error in ivtv_udma_setup: %d bytes, %d pages returned\n", + __func__, size_in_bytes, itv->udma.page_count); - /* pin_user_pages must have failed completely */ + /* pin_user_pages or DMA must have failed completely */ return -EIO; } diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c index 9c6cfef03331..a66df6adfaad 100644 --- a/drivers/media/pci/saa7134/saa7134-dvb.c +++ b/drivers/media/pci/saa7134/saa7134-dvb.c @@ -466,7 +466,9 @@ static int philips_europa_tuner_sleep(struct dvb_frontend *fe) /* switch the board to analog mode */ if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); - i2c_transfer(&dev->i2c_adap, &analog_msg, 1); + if (i2c_transfer(&dev->i2c_adap, &analog_msg, 1) != 1) + return -EIO; + return 0; } @@ -1018,7 +1020,9 @@ static int md8800_set_voltage2(struct dvb_frontend *fe, else wbuf[1] = rbuf & 0xef; msg[0].len = 2; - i2c_transfer(&dev->i2c_adap, msg, 1); + if (i2c_transfer(&dev->i2c_adap, msg, 1) != 1) + return -EIO; + return 0; } diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c index 4bc89c8644fe..5f848691cea4 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c @@ -449,7 +449,7 @@ static int vdec_vp8_decode(void *h_vdec, struct mtk_vcodec_mem *bs, inst->frm_cnt, y_fb_dma, c_fb_dma, fb); inst->cur_fb = fb; - dec->bs_dma = (uint64_t)bs->dma_addr; + dec->bs_dma = bs->dma_addr; dec->bs_sz = bs->size; dec->cur_y_fb_dma = y_fb_dma; dec->cur_c_fb_dma = c_fb_dma; diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c index da6be556727b..145958206e38 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c @@ -233,6 +233,12 @@ int vpu_dec_init(struct vdec_vpu_inst *vpu) mtk_vdec_debug(vpu->ctx, "vdec_inst=%p", vpu); err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg)); + + if (IS_ERR_OR_NULL(vpu->vsi)) { + mtk_vdec_err(vpu->ctx, "invalid vdec vsi, status=%d", err); + return -EINVAL; + } + mtk_vdec_debug(vpu->ctx, "- ret=%d", err); return err; } diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c index cc97790ed30f..b1300f15e502 100644 --- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c +++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c @@ -1634,6 +1634,9 @@ static int mxc_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) dev_dbg(ctx->mxc_jpeg->dev, "Start streaming ctx=%p", ctx); q_data->sequence = 0; + if (V4L2_TYPE_IS_CAPTURE(q->type)) + ctx->need_initial_source_change_evt = false; + ret = pm_runtime_resume_and_get(ctx->mxc_jpeg->dev); if (ret < 0) { dev_err(ctx->mxc_jpeg->dev, "Failed to power up jpeg\n"); diff --git a/drivers/media/platform/nxp/imx-pxp.c b/drivers/media/platform/nxp/imx-pxp.c index e62dc5c1a4ae..e4427e6487fb 100644 --- a/drivers/media/platform/nxp/imx-pxp.c +++ b/drivers/media/platform/nxp/imx-pxp.c @@ -1805,6 +1805,9 @@ static int pxp_probe(struct platform_device *pdev) return PTR_ERR(mmio); dev->regmap = devm_regmap_init_mmio(&pdev->dev, mmio, &pxp_regmap_config); + if (IS_ERR(dev->regmap)) + return dev_err_probe(&pdev->dev, PTR_ERR(dev->regmap), + "Failed to init regmap\n"); irq = platform_get_irq(pdev, 0); if (irq < 0) diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index 29130a9441e7..d12089370d91 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -1255,7 +1255,7 @@ static int vdec_stop_output(struct venus_inst *inst) break; case VENUS_DEC_STATE_INIT: case VENUS_DEC_STATE_CAPTURE_SETUP: - ret = hfi_session_flush(inst, HFI_FLUSH_INPUT, true); + ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true); break; default: break; @@ -1747,6 +1747,7 @@ static int vdec_close(struct file *file) vdec_pm_get(inst); + cancel_work_sync(&inst->delayed_process_work); v4l2_m2m_ctx_release(inst->m2m_ctx); v4l2_m2m_release(inst->m2m_dev); vdec_ctrl_deinit(inst); diff --git a/drivers/media/platform/renesas/rcar-csi2.c b/drivers/media/platform/renesas/rcar-csi2.c index 582d5e35db0e..2d464e43a5be 100644 --- a/drivers/media/platform/renesas/rcar-csi2.c +++ b/drivers/media/platform/renesas/rcar-csi2.c @@ -1914,12 +1914,14 @@ static int rcsi2_probe(struct platform_device *pdev) ret = v4l2_async_register_subdev(&priv->subdev); if (ret < 0) - goto error_async; + goto error_pm_runtime; dev_info(priv->dev, "%d lanes found\n", priv->lanes); return 0; +error_pm_runtime: + pm_runtime_disable(&pdev->dev); error_async: v4l2_async_nf_unregister(&priv->notifier); v4l2_async_nf_cleanup(&priv->notifier); @@ -1936,6 +1938,7 @@ static void rcsi2_remove(struct platform_device *pdev) v4l2_async_nf_unregister(&priv->notifier); v4l2_async_nf_cleanup(&priv->notifier); v4l2_async_unregister_subdev(&priv->subdev); + v4l2_subdev_cleanup(&priv->subdev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c index e2c40abc6d3d..21d5b2815e86 100644 --- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c +++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c @@ -742,12 +742,22 @@ static int rvin_setup(struct rvin_dev *vin) */ switch (vin->mbus_code) { case MEDIA_BUS_FMT_YUYV8_1X16: - /* BT.601/BT.1358 16bit YCbCr422 */ - vnmc |= VNMC_INF_YUV16; + if (vin->is_csi) + /* YCbCr422 8-bit */ + vnmc |= VNMC_INF_YUV8_BT601; + else + /* BT.601/BT.1358 16bit YCbCr422 */ + vnmc |= VNMC_INF_YUV16; input_is_yuv = true; break; case MEDIA_BUS_FMT_UYVY8_1X16: - vnmc |= VNMC_INF_YUV16 | VNMC_YCAL; + if (vin->is_csi) + /* YCbCr422 8-bit */ + vnmc |= VNMC_INF_YUV8_BT601; + else + /* BT.601/BT.1358 16bit YCbCr422 */ + vnmc |= VNMC_INF_YUV16; + vnmc |= VNMC_YCAL; input_is_yuv = true; break; case MEDIA_BUS_FMT_UYVY8_2X8: diff --git a/drivers/media/platform/renesas/vsp1/vsp1_histo.c b/drivers/media/platform/renesas/vsp1/vsp1_histo.c index 71155282ca11..cd1c8778662e 100644 --- a/drivers/media/platform/renesas/vsp1/vsp1_histo.c +++ b/drivers/media/platform/renesas/vsp1/vsp1_histo.c @@ -36,9 +36,8 @@ struct vsp1_histogram_buffer * vsp1_histogram_buffer_get(struct vsp1_histogram *histo) { struct vsp1_histogram_buffer *buf = NULL; - unsigned long flags; - spin_lock_irqsave(&histo->irqlock, flags); + spin_lock(&histo->irqlock); if (list_empty(&histo->irqqueue)) goto done; @@ -49,7 +48,7 @@ vsp1_histogram_buffer_get(struct vsp1_histogram *histo) histo->readout = true; done: - spin_unlock_irqrestore(&histo->irqlock, flags); + spin_unlock(&histo->irqlock); return buf; } @@ -58,7 +57,6 @@ void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo, size_t size) { struct vsp1_pipeline *pipe = histo->entity.pipe; - unsigned long flags; /* * The pipeline pointer is guaranteed to be valid as this function is @@ -70,10 +68,10 @@ void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo, vb2_set_plane_payload(&buf->buf.vb2_buf, 0, size); vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); - spin_lock_irqsave(&histo->irqlock, flags); + spin_lock(&histo->irqlock); histo->readout = false; wake_up(&histo->wait_queue); - spin_unlock_irqrestore(&histo->irqlock, flags); + spin_unlock(&histo->irqlock); } /* ----------------------------------------------------------------------------- @@ -124,11 +122,10 @@ static void histo_buffer_queue(struct vb2_buffer *vb) struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue); struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf); - unsigned long flags; - spin_lock_irqsave(&histo->irqlock, flags); + spin_lock_irq(&histo->irqlock); list_add_tail(&buf->queue, &histo->irqqueue); - spin_unlock_irqrestore(&histo->irqlock, flags); + spin_unlock_irq(&histo->irqlock); } static int histo_start_streaming(struct vb2_queue *vq, unsigned int count) @@ -140,9 +137,8 @@ static void histo_stop_streaming(struct vb2_queue *vq) { struct vsp1_histogram *histo = vb2_get_drv_priv(vq); struct vsp1_histogram_buffer *buffer; - unsigned long flags; - spin_lock_irqsave(&histo->irqlock, flags); + spin_lock_irq(&histo->irqlock); /* Remove all buffers from the IRQ queue. */ list_for_each_entry(buffer, &histo->irqqueue, queue) @@ -152,7 +148,7 @@ static void histo_stop_streaming(struct vb2_queue *vq) /* Wait for the buffer being read out (if any) to complete. */ wait_event_lock_irq(histo->wait_queue, !histo->readout, histo->irqlock); - spin_unlock_irqrestore(&histo->irqlock, flags); + spin_unlock_irq(&histo->irqlock); } static const struct vb2_ops histo_video_queue_qops = { diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.h b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h index 674b5748d929..85ecd53cda49 100644 --- a/drivers/media/platform/renesas/vsp1/vsp1_pipe.h +++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h @@ -73,7 +73,7 @@ struct vsp1_partition_window { * @wpf: The WPF partition window configuration */ struct vsp1_partition { - struct vsp1_partition_window rpf; + struct vsp1_partition_window rpf[VSP1_MAX_RPF]; struct vsp1_partition_window uds_sink; struct vsp1_partition_window uds_source; struct vsp1_partition_window sru; diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c index c47579efc65f..6055554fb071 100644 --- a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c +++ b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c @@ -315,8 +315,8 @@ static void rpf_configure_partition(struct vsp1_entity *entity, * 'width' need to be adjusted. */ if (pipe->partitions > 1) { - crop.width = pipe->partition->rpf.width; - crop.left += pipe->partition->rpf.left; + crop.width = pipe->partition->rpf[rpf->entity.index].width; + crop.left += pipe->partition->rpf[rpf->entity.index].left; } if (pipe->interlaced) { @@ -371,7 +371,9 @@ static void rpf_partition(struct vsp1_entity *entity, unsigned int partition_idx, struct vsp1_partition_window *window) { - partition->rpf = *window; + struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev); + + partition->rpf[rpf->entity.index] = *window; } static const struct vsp1_entity_operations rpf_entity_ops = { diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h index 8e1bfd860524..3fe177b59b16 100644 --- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h +++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h @@ -16,8 +16,8 @@ void c8sectpfe_debugfs_init(struct c8sectpfei *); void c8sectpfe_debugfs_exit(struct c8sectpfei *); #else -static inline void c8sectpfe_debugfs_init(struct c8sectpfei *) {}; -static inline void c8sectpfe_debugfs_exit(struct c8sectpfei *) {}; +static inline void c8sectpfe_debugfs_init(struct c8sectpfei *fei) {}; +static inline void c8sectpfe_debugfs_exit(struct c8sectpfei *fei) {}; #endif #endif /* __C8SECTPFE_DEBUG_H */ diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c index 4acc3b90d03a..7f771ea49b78 100644 --- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c +++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c @@ -202,8 +202,8 @@ static int dcmipp_create_subdevs(struct dcmipp_device *dcmipp) return 0; err_init_entity: - while (i > 0) - dcmipp->pipe_cfg->ents[i - 1].release(dcmipp->entity[i - 1]); + while (i-- > 0) + dcmipp->pipe_cfg->ents[i].release(dcmipp->entity[i]); return ret; } diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 0b55314a8082..8f1361bcce3a 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -1148,10 +1148,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto) memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet)); - if (!mutex_is_locked(&ictx->lock)) { - unlock = true; - mutex_lock(&ictx->lock); - } + unlock = mutex_trylock(&ictx->lock); retval = send_packet(ictx); if (retval) diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index 52aea4167718..717c441b4a86 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -828,8 +828,10 @@ struct rc_dev *rc_dev_get_from_fd(int fd, bool write) return ERR_PTR(-EINVAL); } - if (write && !(f.file->f_mode & FMODE_WRITE)) + if (write && !(f.file->f_mode & FMODE_WRITE)) { + fdput(f); return ERR_PTR(-EPERM); + } fh = f.file->private_data; dev = fh->rc; diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c index fbf58012becd..22d83ac18eb7 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c @@ -23,11 +23,40 @@ static int dvb_usb_force_pid_filter_usage; module_param_named(force_pid_filter_usage, dvb_usb_force_pid_filter_usage, int, 0444); MODULE_PARM_DESC(force_pid_filter_usage, "force all dvb-usb-devices to use a PID filter, if any (default: 0)."); +static int dvb_usb_check_bulk_endpoint(struct dvb_usb_device *d, u8 endpoint) +{ + if (endpoint) { + int ret; + + ret = usb_pipe_type_check(d->udev, usb_sndbulkpipe(d->udev, endpoint)); + if (ret) + return ret; + ret = usb_pipe_type_check(d->udev, usb_rcvbulkpipe(d->udev, endpoint)); + if (ret) + return ret; + } + return 0; +} + +static void dvb_usb_clear_halt(struct dvb_usb_device *d, u8 endpoint) +{ + if (endpoint) { + usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, endpoint)); + usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, endpoint)); + } +} + static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) { struct dvb_usb_adapter *adap; int ret, n, o; + ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint); + if (ret) + return ret; + ret = dvb_usb_check_bulk_endpoint(d, d->props.generic_bulk_ctrl_endpoint_response); + if (ret) + return ret; for (n = 0; n < d->props.num_adapters; n++) { adap = &d->adapter[n]; adap->dev = d; @@ -103,10 +132,8 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) * when reloading the driver w/o replugging the device * sometimes a timeout occurs, this helps */ - if (d->props.generic_bulk_ctrl_endpoint != 0) { - usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); - usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); - } + dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint); + dvb_usb_clear_halt(d, d->props.generic_bulk_ctrl_endpoint_response); return 0; diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index 4b685f883e4d..a7d0ec22d95c 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -2031,7 +2031,13 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev, else ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id, dev->intfnum, info->selector, data, 1); - if (!ret) + + if (!ret) { + info->flags &= ~(UVC_CTRL_FLAG_GET_CUR | + UVC_CTRL_FLAG_SET_CUR | + UVC_CTRL_FLAG_AUTO_UPDATE | + UVC_CTRL_FLAG_ASYNCHRONOUS); + info->flags |= (data[0] & UVC_CONTROL_CAP_GET ? UVC_CTRL_FLAG_GET_CUR : 0) | (data[0] & UVC_CONTROL_CAP_SET ? @@ -2040,6 +2046,7 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev, UVC_CTRL_FLAG_AUTO_UPDATE : 0) | (data[0] & UVC_CONTROL_CAP_ASYNCHRONOUS ? UVC_CTRL_FLAG_ASYNCHRONOUS : 0); + } kfree(data); return ret; diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 8fe24c98087e..d435b6a6c295 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -2580,7 +2580,17 @@ static const struct usb_device_id uvc_ids[] = { .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, - .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_RESTORE_CTRLS_ON_INIT) }, + .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_RESTORE_CTRLS_ON_INIT + | UVC_QUIRK_INVALID_DEVICE_SOF) }, + /* Logitech HD Pro Webcam C922 */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x046d, + .idProduct = 0x085c, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_INVALID_DEVICE_SOF) }, /* Logitech Rally Bar Huddle */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 7cbf4692bd87..51f4f653b983 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -529,6 +529,17 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, stream->clock.last_sof = dev_sof; host_sof = usb_get_current_frame_number(stream->dev->udev); + + /* + * On some devices, like the Logitech C922, the device SOF does not run + * at a stable rate of 1kHz. For those devices use the host SOF instead. + * In the tests performed so far, this improves the timestamp precision. + * This is probably explained by a small packet handling jitter from the + * host, but the exact reason hasn't been fully determined. + */ + if (stream->dev->quirks & UVC_QUIRK_INVALID_DEVICE_SOF) + dev_sof = host_sof; + time = uvc_video_get_time(); /* @@ -709,11 +720,11 @@ void uvc_video_clock_update(struct uvc_streaming *stream, unsigned long flags; u64 timestamp; u32 delta_stc; - u32 y1, y2; + u32 y1; u32 x1, x2; u32 mean; u32 sof; - u64 y; + u64 y, y2; if (!uvc_hw_timestamps_param) return; @@ -753,7 +764,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream, sof = y; uvc_dbg(stream->dev, CLOCK, - "%s: PTS %u y %llu.%06llu SOF %u.%06llu (x1 %u x2 %u y1 %u y2 %u SOF offset %u)\n", + "%s: PTS %u y %llu.%06llu SOF %u.%06llu (x1 %u x2 %u y1 %u y2 %llu SOF offset %u)\n", stream->dev->name, buf->pts, y >> 16, div_u64((y & 0xffff) * 1000000, 65536), sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536), @@ -768,7 +779,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream, goto done; y1 = NSEC_PER_SEC; - y2 = (u32)ktime_to_ns(ktime_sub(last->host_time, first->host_time)) + y1; + y2 = ktime_to_ns(ktime_sub(last->host_time, first->host_time)) + y1; /* * Interpolated and host SOF timestamps can wrap around at slightly @@ -789,7 +800,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream, timestamp = ktime_to_ns(first->host_time) + y - y1; uvc_dbg(stream->dev, CLOCK, - "%s: SOF %u.%06llu y %llu ts %llu buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %u)\n", + "%s: SOF %u.%06llu y %llu ts %llu buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %llu)\n", stream->dev->name, sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536), y, timestamp, vbuf->vb2_buf.timestamp, diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index 3653b2c8a86c..e5b12717016f 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -75,6 +75,7 @@ #define UVC_QUIRK_WAKE_AUTOSUSPEND 0x00002000 #define UVC_QUIRK_NO_RESET_RESUME 0x00004000 #define UVC_QUIRK_DISABLE_AUTOSUSPEND 0x00008000 +#define UVC_QUIRK_INVALID_DEVICE_SOF 0x00010000 /* Format flags */ #define UVC_FMT_FLAG_COMPRESSED 0x00000001 diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c index 222f01665f7c..c477723c07bf 100644 --- a/drivers/media/v4l2-core/v4l2-async.c +++ b/drivers/media/v4l2-core/v4l2-async.c @@ -323,6 +323,9 @@ static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n, sd->entity.function != MEDIA_ENT_F_FLASH) return 0; + if (!n->sd) + return 0; + link = media_create_ancillary_link(&n->sd->entity, &sd->entity); return IS_ERR(link) ? PTR_ERR(link) : 0; diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index 8efdd1f97139..c82d8d8a16ea 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -167,7 +167,7 @@ config FSL_CORENET_CF represents a coherency violation. config FSL_IFC - bool "Freescale IFC driver" if COMPILE_TEST + bool "Freescale IFC driver" depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST depends on HAS_IOMEM diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index c66f07edcd0e..db1ba39de3b5 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -280,7 +280,5 @@ obj-$(CONFIG_MFD_INTEL_M10_BMC_PMCI) += intel-m10-bmc-pmci.o obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o -rsmu-i2c-objs := rsmu_core.o rsmu_i2c.o -rsmu-spi-objs := rsmu_core.o rsmu_spi.o -obj-$(CONFIG_MFD_RSMU_I2C) += rsmu-i2c.o -obj-$(CONFIG_MFD_RSMU_SPI) += rsmu-spi.o +obj-$(CONFIG_MFD_RSMU_I2C) += rsmu_i2c.o rsmu_core.o +obj-$(CONFIG_MFD_RSMU_SPI) += rsmu_spi.o rsmu_core.o diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index b6303ddb013b..f68dd0281463 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -230,8 +230,7 @@ static int usbtll_omap_probe(struct platform_device *pdev) break; } - tll = devm_kzalloc(dev, sizeof(*tll) + sizeof(tll->ch_clk[nch]), - GFP_KERNEL); + tll = devm_kzalloc(dev, struct_size(tll, ch_clk, nch), GFP_KERNEL); if (!tll) { pm_runtime_put_sync(dev); pm_runtime_disable(dev); diff --git a/drivers/mfd/rsmu_core.c b/drivers/mfd/rsmu_core.c index 29437fd0bd5b..fd04a6e5dfa3 100644 --- a/drivers/mfd/rsmu_core.c +++ b/drivers/mfd/rsmu_core.c @@ -78,11 +78,13 @@ int rsmu_core_init(struct rsmu_ddata *rsmu) return ret; } +EXPORT_SYMBOL_GPL(rsmu_core_init); void rsmu_core_exit(struct rsmu_ddata *rsmu) { mutex_destroy(&rsmu->lock); } +EXPORT_SYMBOL_GPL(rsmu_core_exit); MODULE_DESCRIPTION("Renesas SMU core driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c index 21feebc3044c..71ca66d1df82 100644 --- a/drivers/misc/eeprom/ee1004.c +++ b/drivers/misc/eeprom/ee1004.c @@ -185,6 +185,8 @@ BIN_ATTRIBUTE_GROUPS(ee1004); static void ee1004_probe_temp_sensor(struct i2c_client *client) { struct i2c_board_info info = { .type = "jc42" }; + unsigned short addr = 0x18 | (client->addr & 7); + unsigned short addr_list[] = { addr, I2C_CLIENT_END }; u8 byte14; int ret; @@ -193,9 +195,7 @@ static void ee1004_probe_temp_sensor(struct i2c_client *client) if (ret != 1 || !(byte14 & BIT(7))) return; - info.addr = 0x18 | (client->addr & 7); - - i2c_new_client_device(client->adapter, &info); + i2c_new_scanned_device(client->adapter, &info, addr_list, NULL); } static void ee1004_cleanup(int idx, struct ee1004_bus_data *bd) diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index cbf8ae85e1ae..614257308516 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -234,8 +234,7 @@ config MTD_NAND_FSL_IFC tristate "Freescale IFC NAND controller" depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST depends on HAS_IOMEM - select FSL_IFC - select MEMORY + depends on FSL_IFC help Various Freescale chips e.g P1010, include a NAND Flash machine with built-in hardware ECC capabilities. diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c index 142fb27b2ea9..e065e4fd42a3 100644 --- a/drivers/mtd/spi-nor/winbond.c +++ b/drivers/mtd/spi-nor/winbond.c @@ -105,7 +105,9 @@ static const struct flash_info winbond_nor_parts[] = { }, { .id = SNOR_ID(0xef, 0x40, 0x18), .name = "w25q128", + .size = SZ_16M, .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB, + .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, }, { .id = SNOR_ID(0xef, 0x40, 0x19), .name = "w25q256", diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile index 5de0378f90db..7dae831ee8b6 100644 --- a/drivers/mtd/tests/Makefile +++ b/drivers/mtd/tests/Makefile @@ -1,19 +1,19 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o -obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o -obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o -obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o -obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o -obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o -obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o -obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o -obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o +obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o mtd_test.o +obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o mtd_test.o +obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o mtd_test.o +obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o mtd_test.o +obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o mtd_test.o +obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o mtd_test.o +obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o mtd_test.o +obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o mtd_test.o +obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o mtd_test.o -mtd_oobtest-objs := oobtest.o mtd_test.o -mtd_pagetest-objs := pagetest.o mtd_test.o -mtd_readtest-objs := readtest.o mtd_test.o -mtd_speedtest-objs := speedtest.o mtd_test.o -mtd_stresstest-objs := stresstest.o mtd_test.o -mtd_subpagetest-objs := subpagetest.o mtd_test.o -mtd_torturetest-objs := torturetest.o mtd_test.o -mtd_nandbiterrs-objs := nandbiterrs.o mtd_test.o +mtd_oobtest-objs := oobtest.o +mtd_pagetest-objs := pagetest.o +mtd_readtest-objs := readtest.o +mtd_speedtest-objs := speedtest.o +mtd_stresstest-objs := stresstest.o +mtd_subpagetest-objs := subpagetest.o +mtd_torturetest-objs := torturetest.o +mtd_nandbiterrs-objs := nandbiterrs.o diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c index c84250beffdc..f391e0300cdc 100644 --- a/drivers/mtd/tests/mtd_test.c +++ b/drivers/mtd/tests/mtd_test.c @@ -25,6 +25,7 @@ int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum) return 0; } +EXPORT_SYMBOL_GPL(mtdtest_erase_eraseblock); static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum) { @@ -57,6 +58,7 @@ int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt, return 0; } +EXPORT_SYMBOL_GPL(mtdtest_scan_for_bad_eraseblocks); int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt, unsigned int eb, int ebcnt) @@ -75,6 +77,7 @@ int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt, return 0; } +EXPORT_SYMBOL_GPL(mtdtest_erase_good_eraseblocks); int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf) { @@ -92,6 +95,7 @@ int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf) return err; } +EXPORT_SYMBOL_GPL(mtdtest_read); int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size, const void *buf) @@ -107,3 +111,8 @@ int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size, return err; } +EXPORT_SYMBOL_GPL(mtdtest_write); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MTD function test helpers"); +MODULE_AUTHOR("Akinobu Mita"); diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index e5ac3cd0bbae..c7ba7a15c9f7 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -1564,6 +1564,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, GFP_KERNEL); if (!fm_eba[i]) { ret = -ENOMEM; + kfree(scan_eba[i]); goto out_free; } @@ -1599,7 +1600,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, } out_free: - for (i = 0; i < num_volumes; i++) { + while (--i >= 0) { if (!ubi->volumes[i]) continue; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index d19aabf5d4fb..2ed0da068490 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1121,13 +1121,10 @@ static struct slave *bond_find_best_slave(struct bonding *bond) return bestslave; } +/* must be called in RCU critical section or with RTNL held */ static bool bond_should_notify_peers(struct bonding *bond) { - struct slave *slave; - - rcu_read_lock(); - slave = rcu_dereference(bond->curr_active_slave); - rcu_read_unlock(); + struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave); if (!slave || !bond->send_peer_notif || bond->send_peer_notif % diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 8f50abe739b7..0783fc121bbb 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -2256,6 +2256,9 @@ static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu) if (is5325(dev) || is5365(dev)) return -EOPNOTSUPP; + if (!dsa_is_cpu_port(ds, port)) + return 0; + enable_jumbo = (mtu >= JMS_MIN_SIZE); allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 0580b2fee21c..baa1eeb9a1b0 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -3917,6 +3917,13 @@ static int ksz_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr, return -EOPNOTSUPP; } + /* KSZ9477 can only perform HSR offloading for up to two ports */ + if (hweight8(dev->hsr_ports) >= 2) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload more than two ports - using software HSR"); + return -EOPNOTSUPP; + } + /* Self MAC address filtering, to avoid frames traversing * the HSR ring more than once. */ diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 07c897b13de1..5b4e2ce5470d 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3626,7 +3626,8 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu) mv88e6xxx_reg_lock(chip); if (chip->info->ops->port_set_jumbo_size) ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu); - else if (chip->info->ops->set_max_frame_size) + else if (chip->info->ops->set_max_frame_size && + dsa_is_cpu_port(ds, port)) ret = chip->info->ops->set_max_frame_size(chip, new_mtu); mv88e6xxx_reg_unlock(chip); diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index a5ebd7110e07..986f43d27711 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -416,7 +416,7 @@ struct bna_ib { /* Tx object */ /* Tx datapath control structure */ -#define BNA_Q_NAME_SIZE 16 +#define BNA_Q_NAME_SIZE (IFNAMSIZ + 6) struct bna_tcb { /* Fast path */ void **sw_qpt; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index fe121d36112d..ece6f3b48327 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1534,8 +1534,9 @@ bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, for (i = 0; i < num_txqs; i++) { vector_num = tx_info->tcb[i]->intr_vector; - sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, - tx_id + tx_info->tcb[i]->id); + snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d", + bnad->netdev->name, + tx_id + tx_info->tcb[i]->id); err = request_irq(bnad->msix_table[vector_num].vector, (irq_handler_t)bnad_msix_tx, 0, tx_info->tcb[i]->name, @@ -1585,9 +1586,9 @@ bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, for (i = 0; i < num_rxps; i++) { vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; - sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", - bnad->netdev->name, - rx_id + rx_info->rx_ctrl[i].ccb->id); + snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE, + "%s CQ %d", bnad->netdev->name, + rx_id + rx_info->rx_ctrl[i].ccb->id); err = request_irq(bnad->msix_table[vector_num].vector, (irq_handler_t)bnad_msix_rx, 0, rx_info->rx_ctrl[i].ccb->name, diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 5f0c9e1771db..7ebd61a3a49b 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -79,7 +79,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); #define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT) #define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \ - NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM) + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \ + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) /** * struct gmac_queue_page - page buffer per-page info @@ -1148,13 +1149,25 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, skb_frag_t *skb_frag; dma_addr_t mapping; void *buffer; + u16 mss; int ret; - /* TODO: implement proper TSO using MTU in word3 */ word1 = skb->len; word3 = SOF_BIT; - if (skb->len >= ETH_FRAME_LEN) { + mss = skb_shinfo(skb)->gso_size; + if (mss) { + /* This means we are dealing with TCP and skb->len is the + * sum total of all the segments. The TSO will deal with + * chopping this up for us. + */ + /* The accelerator needs the full frame size here */ + mss += skb_tcp_all_headers(skb); + netdev_dbg(netdev, "segment offloading mss = %04x len=%04x\n", + mss, skb->len); + word1 |= TSS_MTU_ENABLE_BIT; + word3 |= mss; + } else if (skb->len >= ETH_FRAME_LEN) { /* Hardware offloaded checksumming isn't working on frames * bigger than 1514 bytes. A hypothesis about this is that the * checksum buffer is only 1518 bytes, so when the frames get @@ -1169,7 +1182,9 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, return ret; } word1 |= TSS_BYPASS_BIT; - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { int tcp = 0; /* We do not switch off the checksumming on non TCP/UDP diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 881ece735dcf..fb19295529a2 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1361,6 +1361,12 @@ fec_stop(struct net_device *ndev) writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } + + if (fep->bufdesc_ex) { + val = readl(fep->hwp + FEC_ECNTRL); + val |= FEC_ECR_EN1588; + writel(val, fep->hwp + FEC_ECNTRL); + } } static void diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 24a64ec1073e..e7fb7d6d283d 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -158,15 +158,16 @@ static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx, u32 to_do) { struct gve_tx_buffer_state *info; - u32 clean_end = tx->done + to_do; u64 pkts = 0, bytes = 0; size_t space_freed = 0; u32 xsk_complete = 0; u32 idx; + int i; - for (; tx->done < clean_end; tx->done++) { + for (i = 0; i < to_do; i++) { idx = tx->done & tx->mask; info = &tx->info[idx]; + tx->done++; if (unlikely(!info->xdp.size)) continue; diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index 0b3cca3fc792..f879426cb552 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -866,22 +866,42 @@ static bool gve_can_send_tso(const struct sk_buff *skb) const int header_len = skb_tcp_all_headers(skb); const int gso_size = shinfo->gso_size; int cur_seg_num_bufs; + int prev_frag_size; int cur_seg_size; int i; cur_seg_size = skb_headlen(skb) - header_len; + prev_frag_size = skb_headlen(skb); cur_seg_num_bufs = cur_seg_size > 0; for (i = 0; i < shinfo->nr_frags; i++) { if (cur_seg_size >= gso_size) { cur_seg_size %= gso_size; cur_seg_num_bufs = cur_seg_size > 0; + + if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) { + int prev_frag_remain = prev_frag_size % + GVE_TX_MAX_BUF_SIZE_DQO; + + /* If the last descriptor of the previous frag + * is less than cur_seg_size, the segment will + * span two descriptors in the previous frag. + * Since max gso size (9728) is less than + * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible + * for the segment to span more than two + * descriptors. + */ + if (prev_frag_remain && + cur_seg_size > prev_frag_remain) + cur_seg_num_bufs++; + } } if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg)) return false; - cur_seg_size += skb_frag_size(&shinfo->frags[i]); + prev_frag_size = skb_frag_size(&shinfo->frags[i]); + cur_seg_size += prev_frag_size; } return true; diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index 8e9293e57bfd..e8af26da1fc1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -15,15 +15,14 @@ hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o -obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o +obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o hclge-common.o -hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o \ - hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o +hclge-common-objs += hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o -obj-$(CONFIG_HNS3_HCLGE) += hclge.o +hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o + +obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-common.o hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \ hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \ - hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o - hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index ea40b594dbac..4ad4e8ab2f1f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -48,6 +48,7 @@ void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read) else desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR); } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_reuse_desc); static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev, bool is_pf) @@ -72,6 +73,7 @@ void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc, if (is_read) desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_setup_basic_desc); int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, bool en) @@ -517,6 +519,7 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_send); static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw) { @@ -553,6 +556,7 @@ void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, hclge_comm_free_cmd_desc(&cmdq->csq); hclge_comm_free_cmd_desc(&cmdq->crq); } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_uninit); int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw) { @@ -591,6 +595,7 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw) hclge_comm_free_cmd_desc(&hw->cmq.csq); return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_queue_init); void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw, const struct hclge_comm_cmq_ops *ops) @@ -602,6 +607,7 @@ void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw, cmdq->ops.trace_cmd_get = ops->trace_cmd_get; } } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_init_ops); int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, u32 *fw_version, bool is_pf, @@ -672,3 +678,8 @@ int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_init); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet PF/VF Common Library"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c index b4ae2160aff4..4e2bb6556b1c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c @@ -62,6 +62,7 @@ int hclge_comm_rss_init_cfg(struct hnae3_handle *nic, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_rss_init_cfg); void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset, u16 *tc_valid, u16 *tc_size) @@ -78,6 +79,7 @@ void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset, tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0; } } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_tc_info); int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset, u16 *tc_valid, u16 *tc_size) @@ -113,6 +115,7 @@ int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset, return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_tc_mode); int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg, struct hclge_comm_hw *hw, const u8 *key, @@ -143,6 +146,7 @@ int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_hash_key); int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, @@ -185,11 +189,13 @@ int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev, rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_tuple); u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle) { return HCLGE_COMM_RSS_KEY_SIZE; } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_key_size); int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, const u8 hfunc, u8 *hash_algo) @@ -217,6 +223,7 @@ void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev, for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; } +EXPORT_SYMBOL_GPL(hclge_comm_rss_indir_init_cfg); int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, u8 *tuple_sets) @@ -250,6 +257,7 @@ int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_tuple); static void hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req, @@ -304,6 +312,7 @@ int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev, } return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_indir_table); int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw, struct hclge_comm_rss_cfg *rss_cfg) @@ -332,6 +341,7 @@ int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw, "failed to configure rss input, ret = %d.\n", ret); return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_input_tuple); void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, u8 *hfunc) @@ -355,6 +365,7 @@ void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, if (key) memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE); } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_hash_info); void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, u32 *indir, u16 rss_ind_tbl_size) @@ -367,6 +378,7 @@ void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, for (i = 0; i < rss_ind_tbl_size; i++) indir[i] = rss_cfg->rss_indirection_tbl[i]; } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_indir_tbl); int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, const u8 *key) @@ -408,6 +420,7 @@ int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_algo_key); static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc) { @@ -502,3 +515,4 @@ u64 hclge_comm_convert_rss_tuple(u8 tuple_sets) return tuple_data; } +EXPORT_SYMBOL_GPL(hclge_comm_convert_rss_tuple); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c index 618f66d9586b..2b31188ff555 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c @@ -26,6 +26,7 @@ u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data) return buff; } +EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_stats); int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle) { @@ -33,6 +34,7 @@ int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle) return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE; } +EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_sset_count); u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data) { @@ -56,6 +58,7 @@ u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data) return buff; } +EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_strings); int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, struct hclge_comm_hw *hw) @@ -99,6 +102,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_tqps_update_stats); void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle) { @@ -113,3 +117,4 @@ void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle) memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); } } +EXPORT_SYMBOL_GPL(hclge_comm_reset_tqp_stats); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index e3cab8e98f52..5412eff8ef23 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -534,7 +534,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, * * Returns the number of available flow director filters to this VSI */ -static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) +int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) { u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); u16 num_guar; diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index 021ecbac7848..ab5b118daa2d 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -207,6 +207,8 @@ struct ice_fdir_base_pkt { const u8 *tun_pkt; }; +struct ice_vsi; + int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); @@ -218,6 +220,7 @@ int ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, u8 *pkt, bool frag, bool tun); int ice_get_fdir_cnt_all(struct ice_hw *hw); +int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi); bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input); bool ice_fdir_has_frag(enum ice_fltr_ptype flow); struct ice_fdir_fltr * diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 1191031b2a43..ffd6c42bda1e 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -2413,10 +2413,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, /* Propagate some data to the recipe database */ recps[idx].is_root = !!is_root; recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; - recps[idx].need_pass_l2 = root_bufs.content.act_ctrl & - ICE_AQ_RECIPE_ACT_NEED_PASS_L2; - recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl & - ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; + recps[idx].need_pass_l2 = !!(root_bufs.content.act_ctrl & + ICE_AQ_RECIPE_ACT_NEED_PASS_L2); + recps[idx].allow_pass_l2 = !!(root_bufs.content.act_ctrl & + ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2); bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { recps[idx].chain_idx = root_bufs.content.result_indx & diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 8e4ff3af86c6..b4feb0927687 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -536,6 +536,8 @@ static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir) fdir->fdir_fltr_cnt[flow][0] = 0; fdir->fdir_fltr_cnt[flow][1] = 0; } + + fdir->fdir_fltr_cnt_total = 0; } /** @@ -1560,6 +1562,7 @@ ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, resp->status = status; resp->flow_id = conf->flow_id; vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; + vf->fdir.fdir_fltr_cnt_total++; ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, (u8 *)resp, len); @@ -1624,6 +1627,7 @@ ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, resp->status = status; ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; + vf->fdir.fdir_fltr_cnt_total--; ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, (u8 *)resp, len); @@ -1790,6 +1794,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) struct virtchnl_fdir_add *stat = NULL; struct virtchnl_fdir_fltr_conf *conf; enum virtchnl_status_code v_ret; + struct ice_vsi *vf_vsi; struct device *dev; struct ice_pf *pf; int is_tun = 0; @@ -1798,6 +1803,17 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) pf = vf->pf; dev = ice_pf_to_dev(pf); + vf_vsi = ice_get_vf_vsi(vf); + +#define ICE_VF_MAX_FDIR_FILTERS 128 + if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) || + vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_err(dev, "Max number of FDIR filters for VF %d is reached\n", + vf->vf_id); + goto err_exit; + } + ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); if (ret) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h index c5bcc8d7481c..ac6dcab454b4 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h @@ -29,6 +29,7 @@ struct ice_vf_fdir_ctx { struct ice_vf_fdir { u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; + u16 fdir_fltr_cnt_total; struct ice_fd_hw_prof **fdir_prof; struct idr fdir_rule_idr; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c84ce54a84a0..c11bb0f0b8c4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -4198,8 +4198,6 @@ static int mtk_free_dev(struct mtk_eth *eth) metadata_dst_free(eth->dsa_meta[i]); } - free_netdev(eth->dummy_dev); - return 0; } @@ -5048,6 +5046,7 @@ static void mtk_remove(struct platform_device *pdev) netif_napi_del(ð->tx_napi); netif_napi_del(ð->rx_napi); mtk_cleanup(eth); + free_netdev(eth->dummy_dev); mtk_mdio_cleanup(eth); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index 4b713832fdd5..f5c0a4214c4e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -391,7 +391,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, if (err) return err; - lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id); + lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key, + erp_id); if (IS_ERR(lkey_id)) return PTR_ERR(lkey_id); aentry->lkey_id = lkey_id; @@ -399,7 +400,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE, priority, region->tcam_region_info, - aentry->enc_key, erp_id, + aentry->ht_key.enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -428,7 +429,7 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0, region->tcam_region_info, - aentry->enc_key, erp_id, + aentry->ht_key.enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -457,7 +458,7 @@ mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp, kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE, priority, region->tcam_region_info, - aentry->enc_key, erp_id, + aentry->ht_key.enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -480,15 +481,13 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_afk_encode(afk, region->key_info, &rulei->values, - aentry->ht_key.full_enc_key, mask); + aentry->ht_key.enc_key, mask); erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false); if (IS_ERR(erp_mask)) return PTR_ERR(erp_mask); aentry->erp_mask = erp_mask; aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask); - memcpy(aentry->enc_key, aentry->ht_key.full_enc_key, - sizeof(aentry->enc_key)); /* Compute all needed delta information and clear the delta bits * from the encrypted key. @@ -497,9 +496,8 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta); aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta); aentry->delta_info.value = - mlxsw_sp_acl_erp_delta_value(delta, - aentry->ht_key.full_enc_key); - mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key); + mlxsw_sp_acl_erp_delta_value(delta, aentry->ht_key.enc_key); + mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key); /* Add rule to the list of A-TCAM rules, assuming this * rule is intended to A-TCAM. In case this rule does diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c index 95f63fcf4ba1..a54eedb69a3f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c @@ -249,7 +249,7 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, memcpy(chunk + pad_bytes, &erp_region_id, sizeof(erp_region_id)); memcpy(chunk + key_offset, - &aentry->enc_key[chunk_key_offsets[chunk_index]], + &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]], chunk_key_len); chunk += chunk_len; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index d231f4d2888b..9eee229303cc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -1217,18 +1217,6 @@ static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj, return err ? false : true; } -static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2) -{ - const struct mlxsw_sp_acl_erp_key *key1 = obj1; - const struct mlxsw_sp_acl_erp_key *key2 = obj2; - - /* For hints purposes, two objects are considered equal - * in case the masks are the same. Does not matter what - * the "ctcam" value is. - */ - return memcmp(key1->mask, key2->mask, sizeof(key1->mask)); -} - static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj, void *obj) { @@ -1308,7 +1296,6 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv) static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = { .obj_size = sizeof(struct mlxsw_sp_acl_erp_key), .delta_check = mlxsw_sp_acl_erp_delta_check, - .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp, .delta_create = mlxsw_sp_acl_erp_delta_create, .delta_destroy = mlxsw_sp_acl_erp_delta_destroy, .root_create = mlxsw_sp_acl_erp_root_create, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 79a1d8606512..010204f73ea4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -167,9 +167,9 @@ struct mlxsw_sp_acl_atcam_region { }; struct mlxsw_sp_acl_atcam_entry_ht_key { - char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded - * key. - */ + char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, minus + * delta bits. + */ u8 erp_id; }; @@ -181,9 +181,6 @@ struct mlxsw_sp_acl_atcam_entry { struct rhash_head ht_node; struct list_head list; /* Member in entries_list */ struct mlxsw_sp_acl_atcam_entry_ht_key ht_key; - char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, - * minus delta bits. - */ struct { u16 start; u8 mask; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 608ad31a9702..ad7ae7ba2b8f 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -2950,3 +2950,22 @@ void mana_remove(struct gdma_dev *gd, bool suspending) gd->gdma_context = NULL; kfree(ac); } + +struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index) +{ + struct net_device *ndev; + + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), + "Taking primary netdev without holding the RCU read lock"); + if (port_index >= ac->num_ports) + return NULL; + + /* When mana is used in netvsc, the upper netdevice should be returned. */ + if (ac->ports[port_index]->flags & IFF_SLAVE) + ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]); + else + ndev = ac->ports[port_index]; + + return ndev; +} +EXPORT_SYMBOL_NS(mana_get_primary_netdev_rcu, NET_MANA); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index b25774d69195..8e2049ed6015 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -982,7 +982,7 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable) } static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash, - __le16 perfect_match, bool is_double) + u16 perfect_match, bool is_double) { void __iomem *ioaddr = hw->pcsr; u32 value; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index f8e7775bb633..9a705a5a3a1a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -615,7 +615,7 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw, } static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, - __le16 perfect_match, bool is_double) + u16 perfect_match, bool is_double) { void __iomem *ioaddr = hw->pcsr; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 90384db228b5..a318c84ddb8a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -394,7 +394,7 @@ struct stmmac_ops { struct stmmac_rss *cfg, u32 num_rxq); /* VLAN */ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash, - __le16 perfect_match, bool is_double); + u16 perfect_match, bool is_double); void (*enable_vlan)(struct mac_device_info *hw, u32 type); void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc, struct sk_buff *skb); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c58782c41417..33e2bd5a351c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -6640,7 +6640,7 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le) static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) { u32 crc, hash = 0; - __le16 pmatch = 0; + u16 pmatch = 0; int count = 0; u16 vid = 0; @@ -6655,7 +6655,7 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) if (count > 2) /* VID = 0 always passes filter */ return -EOPNOTSUPP; - pmatch = cpu_to_le16(vid); + pmatch = vid; hash = 0; } diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index d7070dd4fe73..aa66c923790f 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -974,6 +974,7 @@ static int netconsole_netdev_event(struct notifier_block *this, /* rtnl_lock already held * we might sleep in __netpoll_cleanup() */ + nt->enabled = false; spin_unlock_irqrestore(&target_list_lock, flags); __netpoll_cleanup(&nt->np); @@ -981,7 +982,6 @@ static int netconsole_netdev_event(struct notifier_block *this, spin_lock_irqsave(&target_list_lock, flags); netdev_put(nt->np.dev, &nt->np.dev_tracker); nt->np.dev = NULL; - nt->enabled = false; stopped = true; netconsole_target_put(nt); goto restart; diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c index 795ab264eaf2..513cd7f85933 100644 --- a/drivers/net/pse-pd/pse_core.c +++ b/drivers/net/pse-pd/pse_core.c @@ -719,13 +719,13 @@ int pse_ethtool_set_config(struct pse_control *psec, { int err = 0; - if (pse_has_c33(psec)) { + if (pse_has_c33(psec) && config->c33_admin_control) { err = pse_ethtool_c33_set_config(psec, config); if (err) return err; } - if (pse_has_podl(psec)) + if (pse_has_podl(psec) && config->podl_admin_control) err = pse_ethtool_podl_set_config(psec, config); return err; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ea10db9a09fa..5161e7efda2c 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2313,7 +2313,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, return packets; } -static void virtnet_poll_cleantx(struct receive_queue *rq) +static void virtnet_poll_cleantx(struct receive_queue *rq, int budget) { struct virtnet_info *vi = rq->vq->vdev->priv; unsigned int index = vq2rxq(rq->vq); @@ -2331,7 +2331,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) do { virtqueue_disable_cb(sq->vq); - free_old_xmit(sq, true); + free_old_xmit(sq, !!budget); } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { @@ -2375,7 +2375,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) unsigned int xdp_xmit = 0; bool napi_complete; - virtnet_poll_cleantx(rq); + virtnet_poll_cleantx(rq, budget); received = virtnet_receive(rq, budget, &xdp_xmit); rq->packets_in_napi += received; @@ -2489,7 +2489,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) txq = netdev_get_tx_queue(vi->dev, index); __netif_tx_lock(txq, raw_smp_processor_id()); virtqueue_disable_cb(sq->vq); - free_old_xmit(sq, true); + free_old_xmit(sq, !!budget); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { if (netif_tx_queue_stopped(txq)) { diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h index 69946fc70077..bcde2fcf02cf 100644 --- a/drivers/net/wireless/ath/ath11k/ce.h +++ b/drivers/net/wireless/ath/ath11k/ce.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_CE_H @@ -146,7 +146,7 @@ struct ath11k_ce_ring { /* Host address space */ void *base_addr_owner_space_unaligned; /* CE address space */ - u32 base_addr_ce_space_unaligned; + dma_addr_t base_addr_ce_space_unaligned; /* Actual start of descriptors. * Aligned to descriptor-size boundary. @@ -156,7 +156,7 @@ struct ath11k_ce_ring { void *base_addr_owner_space; /* CE address space */ - u32 base_addr_ce_space; + dma_addr_t base_addr_ce_space; /* HAL ring id */ u32 hal_ring_id; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index b82e8fb28541..47554c361963 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1009,6 +1009,16 @@ int ath11k_core_resume(struct ath11k_base *ab) return -ETIMEDOUT; } + if (ab->hw_params.current_cc_support && + ar->alpha2[0] != 0 && ar->alpha2[1] != 0) { + ret = ath11k_reg_set_cc(ar); + if (ret) { + ath11k_warn(ab, "failed to set country code during resume: %d\n", + ret); + return ret; + } + } + ret = ath11k_dp_rx_pktlog_start(ab); if (ret) ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n", @@ -1978,23 +1988,20 @@ static void ath11k_update_11d(struct work_struct *work) struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work); struct ath11k *ar; struct ath11k_pdev *pdev; - struct wmi_set_current_country_params set_current_param = {}; int ret, i; - spin_lock_bh(&ab->base_lock); - memcpy(&set_current_param.alpha2, &ab->new_alpha2, 2); - spin_unlock_bh(&ab->base_lock); - - ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c\n", - set_current_param.alpha2[0], - set_current_param.alpha2[1]); - for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; - memcpy(&ar->alpha2, &set_current_param.alpha2, 2); - ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); + spin_lock_bh(&ab->base_lock); + memcpy(&ar->alpha2, &ab->new_alpha2, 2); + spin_unlock_bh(&ab->base_lock); + + ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c for pdev %d\n", + ar->alpha2[0], ar->alpha2[1], i); + + ret = ath11k_reg_set_cc(ar); if (ret) ath11k_warn(ar->ab, "pdev id %d failed set current country code: %d\n", diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index afd481f5858f..aabde24d8763 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -1877,8 +1877,7 @@ static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) CHECKSUM_NONE : CHECKSUM_UNNECESSARY; } -static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, - enum hal_encrypt_type enctype) +int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype) { switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h index 623da3bf9dc8..c322e30caa96 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.h +++ b/drivers/net/wireless/ath/ath11k/dp_rx.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_DP_RX_H #define ATH11K_DP_RX_H @@ -95,4 +96,6 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab); int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer); +int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype); + #endif /* ATH11K_DP_RX_H */ diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 9b96dbb21d83..eaa53bc39ab2 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -4229,6 +4229,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: arg.key_cipher = WMI_CIPHER_AES_CCM; /* TODO: Re-check if flag is valid */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; @@ -4238,12 +4239,10 @@ static int ath11k_install_key(struct ath11k_vif *arvif, arg.key_txmic_len = 8; arg.key_rxmic_len = 8; break; - case WLAN_CIPHER_SUITE_CCMP_256: - arg.key_cipher = WMI_CIPHER_AES_CCM; - break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: arg.key_cipher = WMI_CIPHER_AES_GCM; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; default: ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher); @@ -5903,7 +5902,10 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif, { struct ath11k_base *ab = ar->ab; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct ieee80211_tx_info *info; + enum hal_encrypt_type enctype; + unsigned int mic_len; dma_addr_t paddr; int buf_id; int ret; @@ -5927,7 +5929,12 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif, ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { - skb_put(skb, IEEE80211_CCMP_MIC_LEN); + if (!(skb_cb->flags & ATH11K_SKB_CIPHER_SET)) + ath11k_warn(ab, "WMI management tx frame without ATH11K_SKB_CIPHER_SET"); + + enctype = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher); + mic_len = ath11k_dp_rx_crypto_mic_len(ar, enctype); + skb_put(skb, mic_len); } } @@ -8851,12 +8858,8 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw, ieee80211_wake_queues(ar->hw); if (ar->ab->hw_params.current_cc_support && - ar->alpha2[0] != 0 && ar->alpha2[1] != 0) { - struct wmi_set_current_country_params set_current_param = {}; - - memcpy(&set_current_param.alpha2, ar->alpha2, 2); - ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); - } + ar->alpha2[0] != 0 && ar->alpha2[1] != 0) + ath11k_reg_set_cc(ar); if (ab->is_reset) { recovery_count = atomic_inc_return(&ab->recovery_count); @@ -10325,11 +10328,8 @@ static int __ath11k_mac_register(struct ath11k *ar) } if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) { - struct wmi_set_current_country_params set_current_param = {}; - - memcpy(&set_current_param.alpha2, ab->new_alpha2, 2); memcpy(&ar->alpha2, ab->new_alpha2, 2); - ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); + ret = ath11k_reg_set_cc(ar); if (ret) ath11k_warn(ar->ab, "failed set cc code for mac register: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index 737fcd450d4b..39232b8f52ba 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -49,7 +49,6 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct wmi_init_country_params init_country_param; - struct wmi_set_current_country_params set_current_param = {}; struct ath11k *ar = hw->priv; int ret; @@ -83,9 +82,8 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) * reg info */ if (ar->ab->hw_params.current_cc_support) { - memcpy(&set_current_param.alpha2, request->alpha2, 2); - memcpy(&ar->alpha2, &set_current_param.alpha2, 2); - ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); + memcpy(&ar->alpha2, request->alpha2, 2); + ret = ath11k_reg_set_cc(ar); if (ret) ath11k_warn(ar->ab, "failed set current country code: %d\n", ret); @@ -1017,3 +1015,11 @@ void ath11k_reg_free(struct ath11k_base *ab) kfree(ab->new_regd[i]); } } + +int ath11k_reg_set_cc(struct ath11k *ar) +{ + struct wmi_set_current_country_params set_current_param = {}; + + memcpy(&set_current_param.alpha2, ar->alpha2, 2); + return ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); +} diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h index 64edb794260a..263ea9061948 100644 --- a/drivers/net/wireless/ath/ath11k/reg.h +++ b/drivers/net/wireless/ath/ath11k/reg.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_REG_H @@ -45,5 +45,5 @@ ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type); int ath11k_reg_handle_chan_list(struct ath11k_base *ab, struct cur_regulatory_info *reg_info, enum ieee80211_ap_reg_power power_type); - +int ath11k_reg_set_cc(struct ath11k *ar); #endif diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c index 443ba12e01f3..0555d35aab47 100644 --- a/drivers/net/wireless/ath/ath12k/acpi.c +++ b/drivers/net/wireless/ath/ath12k/acpi.c @@ -391,4 +391,6 @@ void ath12k_acpi_stop(struct ath12k_base *ab) acpi_remove_notify_handler(ACPI_HANDLE(ab->dev), ACPI_DEVICE_NOTIFY, ath12k_acpi_dsm_notify); + + memset(&ab->acpi, 0, sizeof(ab->acpi)); } diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h index 79af3b6159f1..857bc5f9e946 100644 --- a/drivers/net/wireless/ath/ath12k/ce.h +++ b/drivers/net/wireless/ath/ath12k/ce.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_CE_H @@ -119,7 +119,7 @@ struct ath12k_ce_ring { /* Host address space */ void *base_addr_owner_space_unaligned; /* CE address space */ - u32 base_addr_ce_space_unaligned; + dma_addr_t base_addr_ce_space_unaligned; /* Actual start of descriptors. * Aligned to descriptor-size boundary. @@ -129,7 +129,7 @@ struct ath12k_ce_ring { void *base_addr_owner_space; /* CE address space */ - u32 base_addr_ce_space; + dma_addr_t base_addr_ce_space; /* HAL ring id */ u32 hal_ring_id; diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 6663f4e1792d..52969a1bb5a5 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -50,19 +50,16 @@ int ath12k_core_suspend(struct ath12k_base *ab) if (!ab->hw_params->supports_suspend) return -EOPNOTSUPP; - rcu_read_lock(); for (i = 0; i < ab->num_radios; i++) { - ar = ath12k_mac_get_ar_by_pdev_id(ab, i); + ar = ab->pdevs[i].ar; if (!ar) continue; ret = ath12k_mac_wait_tx_complete(ar); if (ret) { ath12k_warn(ab, "failed to wait tx complete: %d\n", ret); - rcu_read_unlock(); return ret; } } - rcu_read_unlock(); /* PM framework skips suspend_late/resume_early callbacks * if other devices report errors in their suspend callbacks. @@ -86,6 +83,8 @@ int ath12k_core_suspend_late(struct ath12k_base *ab) if (!ab->hw_params->supports_suspend) return -EOPNOTSUPP; + ath12k_acpi_stop(ab); + ath12k_hif_irq_disable(ab); ath12k_hif_ce_irq_disable(ab); diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c index 7843c76a82c1..90476f38e8d4 100644 --- a/drivers/net/wireless/ath/ath12k/dp.c +++ b/drivers/net/wireless/ath/ath12k/dp.c @@ -132,7 +132,9 @@ static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask) static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab, enum hal_ring_type type, int ring_num) { + const struct ath12k_hal_tcl_to_wbm_rbm_map *map; const u8 *grp_mask; + int i; switch (type) { case HAL_WBM2SW_RELEASE: @@ -140,6 +142,14 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab, grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0]; ring_num = 0; } else { + map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map; + for (i = 0; i < ab->hw_params->max_tx_ring; i++) { + if (ring_num == map[i].wbm_ring_num) { + ring_num = i; + break; + } + } + grp_mask = &ab->hw_params->ring_mask->tx[0]; } break; @@ -881,11 +891,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, enum dp_monitor_mode monitor_mode; u8 ring_mask; - while (i < ab->hw_params->max_tx_ring) { - if (ab->hw_params->ring_mask->tx[grp_id] & - BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num)) - ath12k_dp_tx_completion_handler(ab, i); - i++; + if (ab->hw_params->ring_mask->tx[grp_id]) { + i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1; + ath12k_dp_tx_completion_handler(ab, i); } if (ab->hw_params->ring_mask->rx_err[grp_id]) { diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h index 5cf0d21ef184..4dfbff326030 100644 --- a/drivers/net/wireless/ath/ath12k/dp.h +++ b/drivers/net/wireless/ath/ath12k/dp.h @@ -334,6 +334,7 @@ struct ath12k_dp { struct dp_srng reo_except_ring; struct dp_srng reo_cmd_ring; struct dp_srng reo_status_ring; + enum ath12k_peer_metadata_version peer_metadata_ver; struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX]; struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX]; struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX]; diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index 75df622f25d8..121f27284be5 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -2383,8 +2383,10 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc, channel_num = meta_data; center_freq = meta_data >> 16; - if (center_freq >= 5935 && center_freq <= 7105) { + if (center_freq >= ATH12K_MIN_6G_FREQ && + center_freq <= ATH12K_MAX_6G_FREQ) { rx_status->band = NL80211_BAND_6GHZ; + rx_status->freq = center_freq; } else if (channel_num >= 1 && channel_num <= 14) { rx_status->band = NL80211_BAND_2GHZ; } else if (channel_num >= 36 && channel_num <= 173) { @@ -2402,8 +2404,9 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc, rx_desc, sizeof(*rx_desc)); } - rx_status->freq = ieee80211_channel_to_frequency(channel_num, - rx_status->band); + if (rx_status->band != NL80211_BAND_6GHZ) + rx_status->freq = ieee80211_channel_to_frequency(channel_num, + rx_status->band); ath12k_dp_rx_h_rate(ar, rx_desc, rx_status); } @@ -2604,6 +2607,29 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, rcu_read_unlock(); } +static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab, + enum ath12k_peer_metadata_version ver, + __le32 peer_metadata) +{ + switch (ver) { + default: + ath12k_warn(ab, "Unknown peer metadata version: %d", ver); + fallthrough; + case ATH12K_PEER_METADATA_V0: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V0_PEER_ID); + case ATH12K_PEER_METADATA_V1: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1_PEER_ID); + case ATH12K_PEER_METADATA_V1A: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1A_PEER_ID); + case ATH12K_PEER_METADATA_V1B: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1B_PEER_ID); + } +} + int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, struct napi_struct *napi, int budget) { @@ -2632,6 +2658,8 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, ath12k_hal_srng_access_begin(ab, srng); while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + struct rx_mpdu_desc *mpdu_info; + struct rx_msdu_desc *msdu_info; enum hal_reo_dest_ring_push_reason push_reason; u32 cookie; @@ -2678,16 +2706,19 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, continue; } - rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + msdu_info = &desc->rx_msdu_info; + mpdu_info = &desc->rx_mpdu_info; + + rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) & RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); - rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) & RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); - rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); rxcb->mac_id = mac_id; - rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data, - RX_MPDU_DESC_META_DATA_PEER_ID); - rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0, + rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver, + mpdu_info->peer_meta_data); + rxcb->tid = le32_get_bits(mpdu_info->info0, RX_MPDU_DESC_INFO0_TID); __skb_queue_tail(&msdu_list, msdu); @@ -2991,7 +3022,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, struct hal_srng *srng; dma_addr_t link_paddr, buf_paddr; u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info; - u32 cookie, hal_rx_desc_sz, dest_ring_info0; + u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi; int ret; struct ath12k_rx_desc_info *desc_info; u8 dst_ind; @@ -3027,7 +3058,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, buf_paddr = dma_map_single(ab->dev, defrag_skb->data, defrag_skb->len + skb_tailroom(defrag_skb), - DMA_FROM_DEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(ab->dev, buf_paddr)) return -ENOMEM; @@ -3083,13 +3114,11 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, reo_ent_ring->rx_mpdu_info.peer_meta_data = reo_dest_ring->rx_mpdu_info.peer_meta_data; - /* Firmware expects physical address to be filled in queue_addr_lo in - * the MLO scenario and in case of non MLO peer meta data needs to be - * filled. - * TODO: Need to handle for MLO scenario. - */ - reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data; - reo_ent_ring->info0 = le32_encode_bits(dst_ind, + reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr)); + queue_addr_hi = upper_32_bits(rx_tid->paddr); + reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi, + HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) | + le32_encode_bits(dst_ind, HAL_REO_ENTR_RING_INFO0_DEST_IND); reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn, @@ -3113,7 +3142,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, spin_unlock_bh(&dp->rx_desc_lock); err_unmap_dma: dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb), - DMA_FROM_DEVICE); + DMA_TO_DEVICE); return ret; } diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index 9b6d7d72f57c..a7c7a868c14c 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -352,15 +352,15 @@ static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab, u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); skb_cb = ATH12K_SKB_CB(msdu); + ar = ab->pdevs[pdev_id].ar; dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); if (skb_cb->paddr_ext_desc) dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE); - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->ah->hw, msdu); - ar = ab->pdevs[pdev_id].ar; if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); } @@ -448,6 +448,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, struct hal_tx_status *ts) { struct ath12k_base *ab = ar->ab; + struct ath12k_hw *ah = ar->ah; struct ieee80211_tx_info *info; struct ath12k_skb_cb *skb_cb; @@ -466,12 +467,12 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, rcu_read_lock(); if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) { - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ah->hw, msdu); goto exit; } if (!skb_cb->vif) { - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ah->hw, msdu); goto exit; } @@ -481,18 +482,36 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, /* skip tx rate update from ieee80211_status*/ info->status.rates[0].idx = -1; - if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED && - !(info->flags & IEEE80211_TX_CTL_NO_ACK)) { - info->flags |= IEEE80211_TX_STAT_ACK; - info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR + - ts->ack_rssi; - info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; + switch (ts->status) { + case HAL_WBM_TQM_REL_REASON_FRAME_ACKED: + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + info->flags |= IEEE80211_TX_STAT_ACK; + info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR + + ts->ack_rssi; + info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; + } + break; + case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: + if (info->flags & IEEE80211_TX_CTL_NO_ACK) { + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + break; + } + fallthrough; + case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: + case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD: + case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: + /* The failure status is due to internal firmware tx failure + * hence drop the frame; do not update the status of frame to + * the upper layer + */ + ieee80211_free_txskb(ah->hw, msdu); + goto exit; + default: + ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n", + ts->status); + break; } - if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX && - (info->flags & IEEE80211_TX_CTL_NO_ACK)) - info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; - /* NOTE: Tx rate status reporting. Tx completion status does not have * necessary information (for example nss) to build the tx rate. * Might end up reporting it out-of-band from HTT stats. diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h index 63340256d3f6..072e36365808 100644 --- a/drivers/net/wireless/ath/ath12k/hal_desc.h +++ b/drivers/net/wireless/ath/ath12k/hal_desc.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" @@ -597,8 +597,30 @@ struct hal_tlv_64_hdr { #define RX_MPDU_DESC_INFO0_MPDU_QOS_CTRL_VALID BIT(27) #define RX_MPDU_DESC_INFO0_TID GENMASK(31, 28) -/* TODO revisit after meta data is concluded */ -#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0) +/* Peer Metadata classification */ + +/* Version 0 */ +#define RX_MPDU_DESC_META_DATA_V0_PEER_ID GENMASK(15, 0) +#define RX_MPDU_DESC_META_DATA_V0_VDEV_ID GENMASK(23, 16) + +/* Version 1 */ +#define RX_MPDU_DESC_META_DATA_V1_PEER_ID GENMASK(13, 0) +#define RX_MPDU_DESC_META_DATA_V1_LOGICAL_LINK_ID GENMASK(15, 14) +#define RX_MPDU_DESC_META_DATA_V1_VDEV_ID GENMASK(23, 16) +#define RX_MPDU_DESC_META_DATA_V1_LMAC_ID GENMASK(25, 24) +#define RX_MPDU_DESC_META_DATA_V1_DEVICE_ID GENMASK(28, 26) + +/* Version 1A */ +#define RX_MPDU_DESC_META_DATA_V1A_PEER_ID GENMASK(13, 0) +#define RX_MPDU_DESC_META_DATA_V1A_VDEV_ID GENMASK(21, 14) +#define RX_MPDU_DESC_META_DATA_V1A_LOGICAL_LINK_ID GENMASK(25, 22) +#define RX_MPDU_DESC_META_DATA_V1A_DEVICE_ID GENMASK(28, 26) + +/* Version 1B */ +#define RX_MPDU_DESC_META_DATA_V1B_PEER_ID GENMASK(13, 0) +#define RX_MPDU_DESC_META_DATA_V1B_VDEV_ID GENMASK(21, 14) +#define RX_MPDU_DESC_META_DATA_V1B_HW_LINK_ID GENMASK(25, 22) +#define RX_MPDU_DESC_META_DATA_V1B_DEVICE_ID GENMASK(28, 26) struct rx_mpdu_desc { __le32 info0; /* %RX_MPDU_DESC_INFO */ @@ -2048,6 +2070,19 @@ struct hal_wbm_release_ring { * fw with fw_reason2. * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by * fw with fw_reason3. + * @HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE: Remove command initiated by + * fw with disable queue. + * @HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING: Remove command initiated by + * fw to remove all mpdu until 1st non-match. + * @HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD: Dropped due to drop threshold + * criteria + * @HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL: Dropped due to link desc + * not available + * @HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU: Dropped due drop bit set or + * null flow + * @HAL_WBM_TQM_REL_REASON_MULTICAST_DROP: Dropped due mcast drop set for VDEV + * @HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP: Dropped due to being set with + * 'TCL_drop_reason' */ enum hal_wbm_tqm_rel_reason { HAL_WBM_TQM_REL_REASON_FRAME_ACKED, @@ -2058,6 +2093,13 @@ enum hal_wbm_tqm_rel_reason { HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1, HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2, HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3, + HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE, + HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING, + HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD, + HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL, + HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU, + HAL_WBM_TQM_REL_REASON_MULTICAST_DROP, + HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP, }; struct hal_wbm_buffer_ring { diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c index f4c827015821..bff8cf97a18c 100644 --- a/drivers/net/wireless/ath/ath12k/hw.c +++ b/drivers/net/wireless/ath/ath12k/hw.c @@ -544,9 +544,6 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = { }, .rx_mon_dest = { 0, 0, 0, - ATH12K_RX_MON_RING_MASK_0, - ATH12K_RX_MON_RING_MASK_1, - ATH12K_RX_MON_RING_MASK_2, }, .rx = { 0, 0, 0, 0, @@ -572,16 +569,15 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = { ATH12K_HOST2RXDMA_RING_MASK_0, }, .tx_mon_dest = { - ATH12K_TX_MON_RING_MASK_0, - ATH12K_TX_MON_RING_MASK_1, + 0, 0, 0, }, }; static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = { .tx = { ATH12K_TX_RING_MASK_0, + ATH12K_TX_RING_MASK_1, ATH12K_TX_RING_MASK_2, - ATH12K_TX_RING_MASK_4, }, .rx_mon_dest = { }, diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h index 3f450ee93f34..2a314cfc8cb8 100644 --- a/drivers/net/wireless/ath/ath12k/hw.h +++ b/drivers/net/wireless/ath/ath12k/hw.h @@ -78,8 +78,7 @@ #define TARGET_NUM_WDS_ENTRIES 32 #define TARGET_DMA_BURST_SIZE 1 #define TARGET_RX_BATCHMODE 1 -#define TARGET_RX_PEER_METADATA_VER_V1A 2 -#define TARGET_RX_PEER_METADATA_VER_V1B 3 +#define TARGET_EMA_MAX_PROFILE_PERIOD 8 #define ATH12K_HW_DEFAULT_QUEUE 0 #define ATH12K_HW_MAX_QUEUES 4 diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 805cb084484a..ead37a4e002a 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -7386,7 +7386,8 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, arvif->is_started = false; } - if (arvif->vdev_type != WMI_VDEV_TYPE_STA) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA && + arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { ath12k_bss_disassoc(ar, arvif); ret = ath12k_mac_vdev_stop(arvif); if (ret) @@ -8488,19 +8489,23 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah) static const u8 ath12k_if_types_ext_capa[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, }; static const u8 ath12k_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; static const u8 ath12k_if_types_ext_capa_ap[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT, + [10] = WLAN_EXT_CAPA11_EMA_SUPPORT, }; static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = { @@ -8605,6 +8610,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) u32 ht_cap = U32_MAX, antennas_rx = 0, antennas_tx = 0; bool is_6ghz = false, is_raw_mode = false, is_monitor_disable = false; u8 *mac_addr = NULL; + u8 mbssid_max_interfaces = 0; wiphy->max_ap_assoc_sta = 0; @@ -8648,6 +8654,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) mac_addr = ar->mac_addr; else mac_addr = ab->mac_addr; + + mbssid_max_interfaces += TARGET_NUM_VDEVS; } wiphy->available_antennas_rx = antennas_rx; @@ -8739,6 +8747,9 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa; wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa); + wiphy->mbssid_max_interfaces = mbssid_max_interfaces; + wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD; + if (is_6ghz) { wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY); @@ -8777,9 +8788,9 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret); goto err_unregister_hw; } - } - ath12k_debugfs_register(ar); + ath12k_debugfs_register(ar); + } return 0; diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index 7a52d2082b79..ef775af25093 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -228,9 +228,12 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, config->peer_map_unmap_version = 0x32; config->twt_ap_pdev_count = ab->num_radios; config->twt_ap_sta_count = 1000; + config->ema_max_vap_cnt = ab->num_radios; + config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD; + config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt; if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map)) - config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B; + config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B; } void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, @@ -3473,11 +3476,13 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params); wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count); wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count); - wmi_cfg->flags2 = le32_encode_bits(tg_cfg->dp_peer_meta_data_ver, + wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver, WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION); - wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported << WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT); + wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt); + wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period); + wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET); } static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi, @@ -3701,6 +3706,8 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab) arg.num_band_to_mac = ab->num_radios; ath12k_fill_band_to_mac_param(ab, arg.band_to_mac); + ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver; + return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg); } @@ -6022,8 +6029,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) status->flag |= RX_FLAG_MMIC_ERROR; - if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) { + if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ && + rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) { status->band = NL80211_BAND_6GHZ; + status->freq = rx_ev.chan_freq; } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { status->band = NL80211_BAND_2GHZ; } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) { @@ -6044,8 +6053,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) sband = &ar->mac.sbands[status->band]; - status->freq = ieee80211_channel_to_frequency(rx_ev.channel, - status->band); + if (status->band != NL80211_BAND_6GHZ) + status->freq = ieee80211_channel_to_frequency(rx_ev.channel, + status->band); + status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR; status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index 496866673aea..742fe0b36cf2 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -2292,6 +2292,13 @@ struct ath12k_wmi_host_mem_chunk_arg { u32 req_id; }; +enum ath12k_peer_metadata_version { + ATH12K_PEER_METADATA_V0, + ATH12K_PEER_METADATA_V1, + ATH12K_PEER_METADATA_V1A, + ATH12K_PEER_METADATA_V1B +}; + struct ath12k_wmi_resource_config_arg { u32 num_vdevs; u32 num_peers; @@ -2354,8 +2361,10 @@ struct ath12k_wmi_resource_config_arg { u32 sched_params; u32 twt_ap_pdev_count; u32 twt_ap_sta_count; + enum ath12k_peer_metadata_version peer_metadata_ver; + u32 ema_max_vap_cnt; + u32 ema_max_profile_period; bool is_reg_cc_ext_event_supported; - u8 dp_peer_meta_data_ver; }; struct ath12k_wmi_init_cmd_arg { @@ -2410,6 +2419,7 @@ struct wmi_init_cmd { #define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4 #define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4) #define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5) +#define WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET BIT(9) struct ath12k_wmi_resource_config_params { __le32 tlv_header; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c index aae2cf95fe95..e472591f321b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c @@ -2567,7 +2567,6 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi, struct lcnphy_txgains cal_gains, temp_gains; u16 hash; - u8 band_idx; int j; u16 ncorr_override[5]; u16 syst_coeffs[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, @@ -2599,6 +2598,9 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi, u16 *values_to_save; struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; + if (WARN_ON(CHSPEC_IS5G(pi->radio_chanspec))) + return; + values_to_save = kmalloc_array(20, sizeof(u16), GFP_ATOMIC); if (NULL == values_to_save) return; @@ -2662,20 +2664,18 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi, hash = (target_gains->gm_gain << 8) | (target_gains->pga_gain << 4) | (target_gains->pad_gain); - band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0); - cal_gains = *target_gains; memset(ncorr_override, 0, sizeof(ncorr_override)); - for (j = 0; j < iqcal_gainparams_numgains_lcnphy[band_idx]; j++) { - if (hash == tbl_iqcal_gainparams_lcnphy[band_idx][j][0]) { + for (j = 0; j < iqcal_gainparams_numgains_lcnphy[0]; j++) { + if (hash == tbl_iqcal_gainparams_lcnphy[0][j][0]) { cal_gains.gm_gain = - tbl_iqcal_gainparams_lcnphy[band_idx][j][1]; + tbl_iqcal_gainparams_lcnphy[0][j][1]; cal_gains.pga_gain = - tbl_iqcal_gainparams_lcnphy[band_idx][j][2]; + tbl_iqcal_gainparams_lcnphy[0][j][2]; cal_gains.pad_gain = - tbl_iqcal_gainparams_lcnphy[band_idx][j][3]; + tbl_iqcal_gainparams_lcnphy[0][j][3]; memcpy(ncorr_override, - &tbl_iqcal_gainparams_lcnphy[band_idx][j][3], + &tbl_iqcal_gainparams_lcnphy[0][j][3], sizeof(ncorr_override)); break; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index 6ec9a8e21a34..92ac6cc40faa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -1082,6 +1082,15 @@ static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm, IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n"); + /* If we exited due to an EXIT reason, and the exit was in less than + * 30 seconds, then a MLO scan was scheduled already. + */ + if (!need_new_sel && + !(mvmvif->last_esr_exit.reason & IWL_MVM_BLOCK_ESR_REASONS)) { + IWL_DEBUG_INFO(mvm, "Wait for MLO scan\n"); + return; + } + /* * If EMLSR was blocked for more than 30 seconds, or the last link * selection decided to not enter EMLSR, trigger a new scan. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index dac6155ae1bd..259afecd1a98 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -4861,6 +4861,7 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct iwl_mvm_roc_ops *ops) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm); u32 lmac_id; int ret; @@ -4873,9 +4874,12 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, */ flush_work(&mvm->roc_done_wk); - ret = iwl_mvm_esr_non_bss_link(mvm, vif, 0, true); - if (ret) - return ret; + if (!IS_ERR_OR_NULL(bss_vif)) { + ret = iwl_mvm_block_esr_sync(mvm, bss_vif, + IWL_MVM_ESR_BLOCKED_ROC); + if (ret) + return ret; + } mutex_lock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 0a1959bd4079..ded094b6b63d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -360,7 +360,9 @@ struct iwl_mvm_vif_link_info { * @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR * @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic * @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR - * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-bssid link's preventing EMLSR + * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-BSS interface's link is + * preventing EMLSR + * @IWL_MVM_ESR_BLOCKED_ROC: remain-on-channel is preventing EMLSR * @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons * @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR * due to low RSSI. @@ -377,6 +379,7 @@ enum iwl_mvm_esr_state { IWL_MVM_ESR_BLOCKED_TPT = 0x4, IWL_MVM_ESR_BLOCKED_FW = 0x8, IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10, + IWL_MVM_ESR_BLOCKED_ROC = 0x20, IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000, IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000, IWL_MVM_ESR_EXIT_COEX = 0x40000, @@ -1860,10 +1863,10 @@ static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) { - u8 rx_ant = mvm->fw->valid_tx_ant; + u8 rx_ant = mvm->fw->valid_rx_ant; if (mvm->nvm_data && mvm->nvm_data->valid_rx_ant) - rx_ant &= mvm->nvm_data->valid_tx_ant; + rx_ant &= mvm->nvm_data->valid_rx_ant; if (mvm->set_rx_ant) rx_ant &= mvm->set_rx_ant; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 31bc80cdcb7d..9d681377cbab 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -47,6 +47,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) { + struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm); struct ieee80211_vif *vif = mvm->p2p_device_vif; lockdep_assert_held(&mvm->mutex); @@ -119,9 +120,9 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) iwl_mvm_rm_aux_sta(mvm); } + if (!IS_ERR_OR_NULL(bss_vif)) + iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_ROC); mutex_unlock(&mvm->mutex); - if (vif) - iwl_mvm_esr_non_bss_link(mvm, vif, 0, false); } void iwl_mvm_roc_done_wk(struct work_struct *wk) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index b909a7665e9c..155eb0fab12a 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -926,6 +926,8 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, return -EOPNOTSUPP; } + priv->bss_num = mwifiex_get_unused_bss_num(adapter, priv->bss_type); + spin_lock_irqsave(&adapter->main_proc_lock, flags); adapter->main_locked = false; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c index bd5a0603b4a2..3abf14d7044f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/8188f.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c @@ -697,9 +697,14 @@ static void rtl8188fu_init_statistics(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32); } +#define TX_POWER_INDEX_MAX 0x3F +#define TX_POWER_INDEX_DEFAULT_CCK 0x22 +#define TX_POWER_INDEX_DEFAULT_HT40 0x27 + static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv) { struct rtl8188fu_efuse *efuse = &priv->efuse_wifi.efuse8188fu; + int i; if (efuse->rtl_id != cpu_to_le16(0x8129)) return -EINVAL; @@ -713,6 +718,16 @@ static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv) efuse->tx_power_index_A.ht40_base, sizeof(efuse->tx_power_index_A.ht40_base)); + for (i = 0; i < ARRAY_SIZE(priv->cck_tx_power_index_A); i++) { + if (priv->cck_tx_power_index_A[i] > TX_POWER_INDEX_MAX) + priv->cck_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_CCK; + } + + for (i = 0; i < ARRAY_SIZE(priv->ht40_1s_tx_power_index_A); i++) { + if (priv->ht40_1s_tx_power_index_A[i] > TX_POWER_INDEX_MAX) + priv->ht40_1s_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_HT40; + } + priv->ofdm_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.a; priv->ht20_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.b; diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index 0dba8aae7716..564f5988ee82 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -1201,6 +1201,15 @@ static int __priority_queue_cfg(struct rtw_dev *rtwdev, rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary); rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary); rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1); + + if (rtwdev->hci.type == RTW_HCI_TYPE_USB) { + rtw_write8_mask(rtwdev, REG_AUTO_LLT_V1, BIT_MASK_BLK_DESC_NUM, + chip->usb_tx_agg_desc_num); + + rtw_write8(rtwdev, REG_AUTO_LLT_V1 + 3, chip->usb_tx_agg_desc_num); + rtw_write8_set(rtwdev, REG_TXDMA_OFFSET_CHK + 1, BIT(1)); + } + rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1); if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0)) diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 49894331f7b4..49a3fd4fb7dc 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -1197,6 +1197,8 @@ struct rtw_chip_info { u16 fw_fifo_addr[RTW_FW_FIFO_MAX]; const struct rtw_fwcd_segs *fwcd_segs; + u8 usb_tx_agg_desc_num; + u8 default_1ss_tx_path; bool path_div_supported; diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index b122f226924b..02ef9a77316b 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -270,6 +270,7 @@ #define BIT_MASK_BCN_HEAD_1_V1 0xfff #define REG_AUTO_LLT_V1 0x0208 #define BIT_AUTO_INIT_LLT_V1 BIT(0) +#define BIT_MASK_BLK_DESC_NUM GENMASK(7, 4) #define REG_DWBCN0_CTRL 0x0208 #define BIT_BCN_VALID BIT(16) #define REG_TXDMA_OFFSET_CHK 0x020C diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c index 8919f9e11f03..222608de33cd 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c @@ -2013,6 +2013,7 @@ const struct rtw_chip_info rtw8703b_hw_spec = { .tx_stbc = false, .max_power_index = 0x3f, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, + .usb_tx_agg_desc_num = 1, /* Not sure if this chip has USB interface */ .path_div_supported = false, .ht_supported = true, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c index f8df4c84d39f..3fba4054d45f 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c @@ -2171,6 +2171,7 @@ const struct rtw_chip_info rtw8723d_hw_spec = { .band = RTW_BAND_2G, .page_size = TX_PAGE_SIZE, .dig_min = 0x20, + .usb_tx_agg_desc_num = 1, .ht_supported = true, .vht_supported = false, .lps_deep_mode_supported = 0, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index fe5d8e188350..526e8de77b3e 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -2008,6 +2008,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = { .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = TX_PAGE_SIZE, .dig_min = 0x1c, + .usb_tx_agg_desc_num = 3, .ht_supported = true, .vht_supported = true, .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK), diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 3017a9760da8..2456ff242818 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -2548,6 +2548,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = { .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = TX_PAGE_SIZE, .dig_min = 0x1c, + .usb_tx_agg_desc_num = 3, .ht_supported = true, .vht_supported = true, .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK), diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index cd965edc29ce..62376d1cca22 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -5366,6 +5366,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = { .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = TX_PAGE_SIZE, .dig_min = 0x20, + .usb_tx_agg_desc_num = 3, .default_1ss_tx_path = BB_PATH_A, .path_div_supported = true, .ht_supported = true, diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index a0188511099a..0001a1ab6f38 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -273,6 +273,8 @@ static void rtw_usb_write_port_tx_complete(struct urb *urb) info = IEEE80211_SKB_CB(skb); tx_data = rtw_usb_get_tx_data(skb); + skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); + /* enqueue to wait for tx report */ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); @@ -377,7 +379,9 @@ static bool rtw_usb_tx_agg_skb(struct rtw_usb *rtwusb, struct sk_buff_head *list skb_iter = skb_peek(list); - if (skb_iter && skb_iter->len + skb_head->len <= RTW_USB_MAX_XMITBUF_SZ) + if (skb_iter && + skb_iter->len + skb_head->len <= RTW_USB_MAX_XMITBUF_SZ && + agg_num < rtwdev->chip->usb_tx_agg_desc_num) __skb_unlink(skb_iter, list); else skb_iter = NULL; diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index affffc4092ba..5b4077c9fd28 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -3531,7 +3531,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) case RX_ENC_HE: seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx, status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? - he_gi_str[rate->he_gi] : "N/A"); + he_gi_str[status->he_gi] : "N/A"); break; case RX_ENC_EHT: seq_printf(m, "EHT %dSS MCS-%d GI:%s", status->nss, status->rate_idx, diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 044a5b90c7f4..6cd692288221 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -6245,7 +6245,14 @@ void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) ret = rtw89_hw_scan_offload(rtwdev, vif, false); if (ret) - rtw89_hw_scan_complete(rtwdev, vif, true); + rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); + + /* Indicate ieee80211_scan_completed() before returning, which is safe + * because scan abort command always waits for completion of + * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan + * work properly. + */ + rtw89_hw_scan_complete(rtwdev, vif, true); } static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) @@ -6715,10 +6722,8 @@ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, skb_put(skb, len); h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; - if (!enable) { - skb_put_zero(skb, sizeof(*gtk_info)); + if (!enable) goto hdr; - } ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, RTW89_PKT_OFLD_TYPE_EAPOL_KEY, diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 3fe0046f6eaa..185b9cd283ed 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -4757,6 +4757,9 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb, } return; case RTW89_SCAN_END_SCAN_NOTIFY: + if (rtwdev->scan_info.abort) + return; + if (rtwvif && rtwvif->scan_req && last_chan < rtwvif->scan_req->n_channels) { ret = rtw89_hw_scan_offload(rtwdev, vif, true); @@ -4765,7 +4768,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb, rtw89_warn(rtwdev, "HW scan failed: %d\n", ret); } } else { - rtw89_hw_scan_complete(rtwdev, vif, rtwdev->scan_info.abort); + rtw89_hw_scan_complete(rtwdev, vif, false); } break; case RTW89_SCAN_ENTER_OP_NOTIFY: diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 03bbcf9b6737..b36aa9a6bb3f 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -2330,21 +2330,20 @@ static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev) u32 backup_aspm; u32 phy_offset; u16 oobs_val; - u16 val16; int ret; if (rtwdev->chip->chip_id != RTL8852C) return; - backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1); - rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); - g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); if (g1_oobs && g2_oobs) - goto out; + return; + + backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1); + rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset); if (ret) @@ -2354,15 +2353,16 @@ static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev) rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL); rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL); - val16 = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT, - OOBS_LEVEL_MASK); - oobs_val = u16_encode_bits(val16, OOBS_SEN_MASK); + oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT, + OOBS_LEVEL_MASK); - rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, oobs_val); + rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, + OOBS_SEN_MASK, oobs_val); rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); - rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, oobs_val); + rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, + OOBS_SEN_MASK, oobs_val); rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); @@ -2783,7 +2783,6 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev) const struct rtw89_pci_info *info = rtwdev->pci_info; int ret; - rtw89_pci_disable_eq(rtwdev); rtw89_pci_ber(rtwdev); rtw89_pci_rxdma_prefth(rtwdev); rtw89_pci_l1off_pwroff(rtwdev); @@ -4155,6 +4154,7 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev) B_AX_SEL_REQ_ENTR_L1); } rtw89_pci_l2_hci_ldo(rtwdev); + rtw89_pci_disable_eq(rtwdev); rtw89_pci_filter_out(rtwdev); rtw89_pci_link_cfg(rtwdev); rtw89_pci_l1ss_cfg(rtwdev); @@ -4289,6 +4289,7 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_clear_resource; } + rtw89_pci_disable_eq(rtwdev); rtw89_pci_filter_out(rtwdev); rtw89_pci_link_cfg(rtwdev); rtw89_pci_l1ss_cfg(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index d351096fa4b4..767de9a2de7e 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -403,6 +403,8 @@ static int rtw8852b_pwr_on_func(struct rtw89_dev *rtwdev) u32 val32; u32 ret; + rtw8852b_pwr_sps_ana(rtwdev); + rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN | B_AX_AFSM_PCIE_SUS_EN); rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_DIS_WLBT_PDNSUSEN_SOPC); @@ -530,9 +532,7 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev) u32 val32; u32 ret; - /* Only do once during probe stage after reading efuse */ - if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags)) - rtw8852b_pwr_sps_ana(rtwdev); + rtw8852b_pwr_sps_ana(rtwdev); ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF, XTAL_SI_RFC2RF); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c index 259df67836a0..a2fa1d339bc2 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c @@ -20,7 +20,7 @@ #define RTW8852B_RF_REL_VERSION 34 #define RTW8852B_DPK_VER 0x0d #define RTW8852B_DPK_RF_PATH 2 -#define RTW8852B_DPK_KIP_REG_NUM 2 +#define RTW8852B_DPK_KIP_REG_NUM 3 #define _TSSI_DE_MASK GENMASK(21, 12) #define ADDC_T_AVG 100 diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c index 6a84ec58d618..4ee374080466 100644 --- a/drivers/net/wireless/virtual/virt_wifi.c +++ b/drivers/net/wireless/virtual/virt_wifi.c @@ -136,6 +136,9 @@ static struct ieee80211_supported_band band_5ghz = { /* Assigned at module init. Guaranteed locally-administered and unicast. */ static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {}; +#define VIRT_WIFI_SSID "VirtWifi" +#define VIRT_WIFI_SSID_LEN 8 + static void virt_wifi_inform_bss(struct wiphy *wiphy) { u64 tsf = div_u64(ktime_get_boottime_ns(), 1000); @@ -146,8 +149,8 @@ static void virt_wifi_inform_bss(struct wiphy *wiphy) u8 ssid[8]; } __packed ssid = { .tag = WLAN_EID_SSID, - .len = 8, - .ssid = "VirtWifi", + .len = VIRT_WIFI_SSID_LEN, + .ssid = VIRT_WIFI_SSID, }; informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz, @@ -213,6 +216,8 @@ struct virt_wifi_netdev_priv { struct net_device *upperdev; u32 tx_packets; u32 tx_failed; + u32 connect_requested_ssid_len; + u8 connect_requested_ssid[IEEE80211_MAX_SSID_LEN]; u8 connect_requested_bss[ETH_ALEN]; bool is_up; bool is_connected; @@ -229,6 +234,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev, if (priv->being_deleted || !priv->is_up) return -EBUSY; + if (!sme->ssid) + return -EINVAL; + + priv->connect_requested_ssid_len = sme->ssid_len; + memcpy(priv->connect_requested_ssid, sme->ssid, sme->ssid_len); + could_schedule = schedule_delayed_work(&priv->connect, HZ * 2); if (!could_schedule) return -EBUSY; @@ -252,12 +263,15 @@ static void virt_wifi_connect_complete(struct work_struct *work) container_of(work, struct virt_wifi_netdev_priv, connect.work); u8 *requested_bss = priv->connect_requested_bss; bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid); + bool right_ssid = priv->connect_requested_ssid_len == VIRT_WIFI_SSID_LEN && + !memcmp(priv->connect_requested_ssid, VIRT_WIFI_SSID, + priv->connect_requested_ssid_len); u16 status = WLAN_STATUS_SUCCESS; if (is_zero_ether_addr(requested_bss)) requested_bss = NULL; - if (!priv->is_up || (requested_bss && !right_addr)) + if (!priv->is_up || (requested_bss && !right_addr) || !right_ssid) status = WLAN_STATUS_UNSPECIFIED_FAILURE; else priv->is_connected = true; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 102a9fb0c65f..5a93f021ca4f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -863,7 +863,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) nvme_start_request(req); return BLK_STS_OK; out_unmap_data: - nvme_unmap_data(dev, req); + if (blk_rq_nr_phys_segments(req)) + nvme_unmap_data(dev, req); out_free_cmd: nvme_cleanup_cmd(req); return ret; @@ -1274,7 +1275,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) dev_warn(dev->ctrl.device, "Does your device have a faulty power saving mode enabled?\n"); dev_warn(dev->ctrl.device, - "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); + "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off pcie_port_pm=off\" and report a bug\n"); } static enum blk_eh_timer_return nvme_timeout(struct request *req) diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index 7d2633940f9b..8bc3f431c77f 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -314,7 +314,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, req->sq->dhchap_c1, challenge, shash_len); if (ret) - goto out_free_response; + goto out_free_challenge; } pr_debug("ctrl %d qid %d host response seq %u transaction %d\n", @@ -325,7 +325,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, GFP_KERNEL); if (!shash) { ret = -ENOMEM; - goto out_free_response; + goto out_free_challenge; } shash->tfm = shash_tfm; ret = crypto_shash_init(shash); @@ -361,9 +361,10 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, goto out; ret = crypto_shash_final(shash, response); out: + kfree(shash); +out_free_challenge: if (challenge != req->sq->dhchap_c1) kfree(challenge); - kfree(shash); out_free_response: nvme_auth_free_key(transformed_key); out_free_tfm: @@ -427,14 +428,14 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, req->sq->dhchap_c2, challenge, shash_len); if (ret) - goto out_free_response; + goto out_free_challenge; } shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), GFP_KERNEL); if (!shash) { ret = -ENOMEM; - goto out_free_response; + goto out_free_challenge; } shash->tfm = shash_tfm; @@ -471,9 +472,10 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, goto out; ret = crypto_shash_final(shash, response); out: + kfree(shash); +out_free_challenge: if (challenge != req->sq->dhchap_c2) kfree(challenge); - kfree(shash); out_free_response: nvme_auth_free_key(transformed_key); out_free_tfm: diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c index cb9aa5428350..7107d68a2f8c 100644 --- a/drivers/nvmem/rockchip-otp.c +++ b/drivers/nvmem/rockchip-otp.c @@ -255,6 +255,7 @@ static int rockchip_otp_read(void *context, unsigned int offset, static struct nvmem_config otp_config = { .name = "rockchip-otp", .owner = THIS_MODULE, + .add_legacy_fixed_of_cells = true, .read_only = true, .stride = 1, .word_size = 1, diff --git a/drivers/opp/core.c b/drivers/opp/core.c index cb4611fe1b5b..4e4d293bf5b1 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -2443,8 +2443,10 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev, * Cross check it again and fix if required. */ gdev = dev_to_genpd_dev(virt_dev); - if (IS_ERR(gdev)) - return PTR_ERR(gdev); + if (IS_ERR(gdev)) { + ret = PTR_ERR(gdev); + goto err; + } genpd_table = _find_opp_table(gdev); if (!IS_ERR(genpd_table)) { diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c index e3b97cd1fbbf..ec0056a4bb13 100644 --- a/drivers/opp/ti-opp-supply.c +++ b/drivers/opp/ti-opp-supply.c @@ -393,10 +393,12 @@ static int ti_opp_supply_probe(struct platform_device *pdev) } ret = dev_pm_opp_set_config_regulators(cpu_dev, ti_opp_config_regulators); - if (ret < 0) + if (ret < 0) { _free_optimized_voltages(dev, &opp_data); + return ret; + } - return ret; + return 0; } static struct platform_driver ti_opp_supply_driver = { diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c index bd388560ed59..c2e371c50dcf 100644 --- a/drivers/parport/procfs.c +++ b/drivers/parport/procfs.c @@ -51,12 +51,12 @@ static int do_active_device(struct ctl_table *table, int write, for (dev = port->devices; dev ; dev = dev->next) { if(dev == port->cad) { - len += sprintf(buffer, "%s\n", dev->name); + len += snprintf(buffer, sizeof(buffer), "%s\n", dev->name); } } if(!len) { - len += sprintf(buffer, "%s\n", "none"); + len += snprintf(buffer, sizeof(buffer), "%s\n", "none"); } if (len > *lenp) @@ -87,19 +87,19 @@ static int do_autoprobe(struct ctl_table *table, int write, } if ((str = info->class_name) != NULL) - len += sprintf (buffer + len, "CLASS:%s;\n", str); + len += snprintf (buffer + len, sizeof(buffer) - len, "CLASS:%s;\n", str); if ((str = info->model) != NULL) - len += sprintf (buffer + len, "MODEL:%s;\n", str); + len += snprintf (buffer + len, sizeof(buffer) - len, "MODEL:%s;\n", str); if ((str = info->mfr) != NULL) - len += sprintf (buffer + len, "MANUFACTURER:%s;\n", str); + len += snprintf (buffer + len, sizeof(buffer) - len, "MANUFACTURER:%s;\n", str); if ((str = info->description) != NULL) - len += sprintf (buffer + len, "DESCRIPTION:%s;\n", str); + len += snprintf (buffer + len, sizeof(buffer) - len, "DESCRIPTION:%s;\n", str); if ((str = info->cmdset) != NULL) - len += sprintf (buffer + len, "COMMAND SET:%s;\n", str); + len += snprintf (buffer + len, sizeof(buffer) - len, "COMMAND SET:%s;\n", str); if (len > *lenp) len = *lenp; @@ -117,7 +117,7 @@ static int do_hardware_base_addr(struct ctl_table *table, int write, void *result, size_t *lenp, loff_t *ppos) { struct parport *port = (struct parport *)table->extra1; - char buffer[20]; + char buffer[64]; int len = 0; if (*ppos) { @@ -128,7 +128,7 @@ static int do_hardware_base_addr(struct ctl_table *table, int write, if (write) /* permissions prevent this anyway */ return -EACCES; - len += sprintf (buffer, "%lu\t%lu\n", port->base, port->base_hi); + len += snprintf (buffer, sizeof(buffer), "%lu\t%lu\n", port->base, port->base_hi); if (len > *lenp) len = *lenp; @@ -155,7 +155,7 @@ static int do_hardware_irq(struct ctl_table *table, int write, if (write) /* permissions prevent this anyway */ return -EACCES; - len += sprintf (buffer, "%d\n", port->irq); + len += snprintf (buffer, sizeof(buffer), "%d\n", port->irq); if (len > *lenp) len = *lenp; @@ -182,7 +182,7 @@ static int do_hardware_dma(struct ctl_table *table, int write, if (write) /* permissions prevent this anyway */ return -EACCES; - len += sprintf (buffer, "%d\n", port->dma); + len += snprintf (buffer, sizeof(buffer), "%d\n", port->dma); if (len > *lenp) len = *lenp; @@ -213,7 +213,7 @@ static int do_hardware_modes(struct ctl_table *table, int write, #define printmode(x) \ do { \ if (port->modes & PARPORT_MODE_##x) \ - len += sprintf(buffer + len, "%s%s", f++ ? "," : "", #x); \ + len += snprintf(buffer + len, sizeof(buffer) - len, "%s%s", f++ ? "," : "", #x); \ } while (0) int f = 0; printmode(PCSPP); diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index d3a7d14ee685..cd0e0022f91d 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -245,8 +245,68 @@ static struct irq_chip ks_pcie_msi_irq_chip = { .irq_unmask = ks_pcie_msi_unmask, }; +/** + * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers + * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone + * PCIe host controller driver information. + * + * Since modification of dbi_cs2 involves different clock domain, read the + * status back to ensure the transition is complete. + */ +static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) +{ + u32 val; + + val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); + val |= DBI_CS2; + ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); + + do { + val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); + } while (!(val & DBI_CS2)); +} + +/** + * ks_pcie_clear_dbi_mode() - Disable DBI mode + * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone + * PCIe host controller driver information. + * + * Since modification of dbi_cs2 involves different clock domain, read the + * status back to ensure the transition is complete. + */ +static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) +{ + u32 val; + + val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); + val &= ~DBI_CS2; + ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); + + do { + val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); + } while (val & DBI_CS2); +} + static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp) { + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + /* Configure and set up BAR0 */ + ks_pcie_set_dbi_mode(ks_pcie); + + /* Enable BAR0 */ + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); + + ks_pcie_clear_dbi_mode(ks_pcie); + + /* + * For BAR0, just setting bus address for inbound writes (MSI) should + * be sufficient. Use physical address to avoid any conflicts. + */ + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); + pp->msi_irq_chip = &ks_pcie_msi_irq_chip; return dw_pcie_allocate_domains(pp); } @@ -340,59 +400,22 @@ static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = { .xlate = irq_domain_xlate_onetwocell, }; -/** - * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers - * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone - * PCIe host controller driver information. - * - * Since modification of dbi_cs2 involves different clock domain, read the - * status back to ensure the transition is complete. - */ -static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) -{ - u32 val; - - val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); - val |= DBI_CS2; - ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); - - do { - val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); - } while (!(val & DBI_CS2)); -} - -/** - * ks_pcie_clear_dbi_mode() - Disable DBI mode - * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone - * PCIe host controller driver information. - * - * Since modification of dbi_cs2 involves different clock domain, read the - * status back to ensure the transition is complete. - */ -static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) -{ - u32 val; - - val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); - val &= ~DBI_CS2; - ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); - - do { - val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); - } while (val & DBI_CS2); -} - -static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) +static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) { u32 val; u32 num_viewport = ks_pcie->num_viewport; struct dw_pcie *pci = ks_pcie->pci; struct dw_pcie_rp *pp = &pci->pp; - u64 start, end; + struct resource_entry *entry; struct resource *mem; + u64 start, end; int i; - mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res; + entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); + if (!entry) + return -ENODEV; + + mem = entry->res; start = mem->start; end = mem->end; @@ -403,7 +426,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) ks_pcie_clear_dbi_mode(ks_pcie); if (ks_pcie->is_am6) - return; + return 0; val = ilog2(OB_WIN_SIZE); ks_pcie_app_writel(ks_pcie, OB_SIZE, val); @@ -420,6 +443,8 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); val |= OB_XLAT_EN_VAL; ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); + + return 0; } static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus, @@ -445,44 +470,10 @@ static struct pci_ops ks_child_pcie_ops = { .write = pci_generic_config_write, }; -/** - * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization - * @bus: A pointer to the PCI bus structure. - * - * This sets BAR0 to enable inbound access for MSI_IRQ register - */ -static int ks_pcie_v3_65_add_bus(struct pci_bus *bus) -{ - struct dw_pcie_rp *pp = bus->sysdata; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - - if (!pci_is_root_bus(bus)) - return 0; - - /* Configure and set up BAR0 */ - ks_pcie_set_dbi_mode(ks_pcie); - - /* Enable BAR0 */ - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); - - ks_pcie_clear_dbi_mode(ks_pcie); - - /* - * For BAR0, just setting bus address for inbound writes (MSI) should - * be sufficient. Use physical address to avoid any conflicts. - */ - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); - - return 0; -} - static struct pci_ops ks_pcie_ops = { .map_bus = dw_pcie_own_conf_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, - .add_bus = ks_pcie_v3_65_add_bus, }; /** @@ -814,7 +805,10 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp) return ret; ks_pcie_stop_link(pci); - ks_pcie_setup_rc_app_regs(ks_pcie); + ret = ks_pcie_setup_rc_app_regs(ks_pcie); + if (ret) + return ret; + writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), pci->dbi_base + PCI_IO_BASE); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 47391d7d3a73..769e84824687 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -161,7 +161,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, if (!ep->bar_to_atu[bar]) free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows); else - free_win = ep->bar_to_atu[bar]; + free_win = ep->bar_to_atu[bar] - 1; if (free_win >= pci->num_ib_windows) { dev_err(pci->dev, "No free inbound window\n"); @@ -175,7 +175,11 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, return ret; } - ep->bar_to_atu[bar] = free_win; + /* + * Always increment free_win before assignment, since value 0 is used to identify + * unallocated mapping. + */ + ep->bar_to_atu[bar] = free_win + 1; set_bit(free_win, ep->ib_window_map); return 0; @@ -212,7 +216,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); enum pci_barno bar = epf_bar->barno; - u32 atu_index = ep->bar_to_atu[bar]; + u32 atu_index = ep->bar_to_atu[bar] - 1; + + if (!ep->bar_to_atu[bar]) + return; __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags); diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index d6842141d384..a909e42b4273 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -240,7 +240,7 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev, return PTR_ERR(rockchip->apb_base); rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset", - GPIOD_OUT_HIGH); + GPIOD_OUT_LOW); if (IS_ERR(rockchip->rst_gpio)) return PTR_ERR(rockchip->rst_gpio); diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index 2fb8c15e7a91..50b1635e3cbb 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -500,12 +500,6 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci) static void qcom_pcie_perst_assert(struct dw_pcie *pci) { struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); - struct device *dev = pci->dev; - - if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) { - dev_dbg(dev, "Link is already disabled\n"); - return; - } dw_pcie_ep_cleanup(&pci->ep); qcom_pcie_disable_resources(pcie_ep); diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 93f5433c5c55..4537313ef37a 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -2015,6 +2015,7 @@ static const struct pci_epc_features tegra_pcie_epc_features = { .bar[BAR_3] = { .type = BAR_RESERVED, }, .bar[BAR_4] = { .type = BAR_RESERVED, }, .bar[BAR_5] = { .type = BAR_RESERVED, }, + .align = SZ_64K, }; static const struct pci_epc_features* diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 5992280e8110..cdd5be16021d 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1130,8 +1130,8 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, PCI_CAPABILITY_LIST) { /* ROM BARs are unimplemented */ *val = 0; - } else if (where >= PCI_INTERRUPT_LINE && where + size <= - PCI_INTERRUPT_PIN) { + } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) || + (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) { /* * Interrupt Line and Interrupt PIN are hard-wired to zero * because this front-end only supports message-signaled diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 8b34ccff073a..bc630ab8a283 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -163,6 +163,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_HDMI, loongson_pci_pin_quirk); +static void loongson_pci_msi_quirk(struct pci_dev *dev) +{ + u16 val, class = dev->class >> 8; + + if (class != PCI_CLASS_BRIDGE_HOST) + return; + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &val); + val |= PCI_MSI_FLAGS_ENABLE; + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk); + static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) { struct pci_config_window *cfg; diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index 996077ab7cfd..c01efc6ea64f 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -78,7 +78,11 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base) writel(L1IATN, pcie_base + PMCTLR); ret = readl_poll_timeout_atomic(pcie_base + PMSR, val, val & L1FAEG, 10, 1000); - WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret); + if (ret) { + dev_warn_ratelimited(pcie_dev, + "Timeout waiting for L1 link state, ret=%d\n", + ret); + } writel(L1FAEG | PMEL1RX, pcie_base + PMSR); } diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c index 0ef2e622d36e..c07d7129f1c7 100644 --- a/drivers/pci/controller/pcie-rockchip.c +++ b/drivers/pci/controller/pcie-rockchip.c @@ -121,7 +121,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) if (rockchip->is_rc) { rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep", - GPIOD_OUT_HIGH); + GPIOD_OUT_LOW); if (IS_ERR(rockchip->ep_gpio)) return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio), "failed to get ep GPIO\n"); diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 977fb79c1567..546d2a27955c 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -735,20 +735,12 @@ static int pci_epf_test_core_init(struct pci_epf *epf) { struct pci_epf_test *epf_test = epf_get_drvdata(epf); struct pci_epf_header *header = epf->header; - const struct pci_epc_features *epc_features; + const struct pci_epc_features *epc_features = epf_test->epc_features; struct pci_epc *epc = epf->epc; struct device *dev = &epf->dev; bool linkup_notifier = false; - bool msix_capable = false; - bool msi_capable = true; int ret; - epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); - if (epc_features) { - msix_capable = epc_features->msix_capable; - msi_capable = epc_features->msi_capable; - } - if (epf->vfunc_no <= 1) { ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header); if (ret) { @@ -761,7 +753,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf) if (ret) return ret; - if (msi_capable) { + if (epc_features->msi_capable) { ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no, epf->msi_interrupts); if (ret) { @@ -770,7 +762,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf) } } - if (msix_capable) { + if (epc_features->msix_capable) { ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no, epf->msix_interrupts, epf_test->test_reg_bar, diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c index 8e779eecd62d..874cb097b093 100644 --- a/drivers/pci/endpoint/functions/pci-epf-vntb.c +++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c @@ -799,8 +799,9 @@ static int epf_ntb_epc_init(struct epf_ntb *ntb) */ static void epf_ntb_epc_cleanup(struct epf_ntb *ntb) { - epf_ntb_db_bar_clear(ntb); epf_ntb_mw_bar_clear(ntb, ntb->num_mws); + epf_ntb_db_bar_clear(ntb); + epf_ntb_config_sspad_bar_clear(ntb); } #define EPF_NTB_R(_name) \ @@ -1018,8 +1019,10 @@ static int vpci_scan_bus(void *sysdata) struct epf_ntb *ndev = sysdata; vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata); - if (vpci_bus) - pr_err("create pci bus\n"); + if (!vpci_bus) { + pr_err("create pci bus failed\n"); + return -EINVAL; + } pci_bus_add_devices(vpci_bus); @@ -1335,13 +1338,19 @@ static int epf_ntb_bind(struct pci_epf *epf) ret = pci_register_driver(&vntb_pci_driver); if (ret) { dev_err(dev, "failure register vntb pci driver\n"); - goto err_bar_alloc; + goto err_epc_cleanup; } - vpci_scan_bus(ntb); + ret = vpci_scan_bus(ntb); + if (ret) + goto err_unregister; return 0; +err_unregister: + pci_unregister_driver(&vntb_pci_driver); +err_epc_cleanup: + epf_ntb_epc_cleanup(ntb); err_bar_alloc: epf_ntb_config_spad_bar_free(ntb); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 35fb1f17a589..dff09e4892d3 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4753,7 +4753,7 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus) */ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) { - struct pci_dev *child; + struct pci_dev *child __free(pci_dev_put) = NULL; int delay; if (pci_dev_is_disconnected(dev)) @@ -4782,8 +4782,8 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) return 0; } - child = list_first_entry(&dev->subordinate->devices, struct pci_dev, - bus_list); + child = pci_dev_get(list_first_entry(&dev->subordinate->devices, + struct pci_dev, bus_list)); up_read(&pci_bus_sem); /* diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 909e6a7c3cc3..141d6b31959b 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -829,11 +829,9 @@ static resource_size_t calculate_memsize(resource_size_t size, size = min_size; if (old_size == 1) old_size = 0; - if (size < old_size) - size = old_size; - size = ALIGN(max(size, add_size) + children_add_size, align); - return size; + size = max(size, add_size) + children_add_size; + return ALIGN(max(size, old_size), align); } resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus, diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 23fa6c5da82c..8ed5c3358920 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -338,6 +338,11 @@ static bool armv8pmu_event_want_user_access(struct perf_event *event) return ATTR_CFG_GET_FLD(&event->attr, rdpmc); } +static u32 armv8pmu_event_get_threshold(struct perf_event_attr *attr) +{ + return ATTR_CFG_GET_FLD(attr, threshold); +} + static u8 armv8pmu_event_threshold_control(struct perf_event_attr *attr) { u8 th_compare = ATTR_CFG_GET_FLD(attr, threshold_compare); @@ -941,7 +946,8 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT; /* Always prefer to place a cycle counter into the cycle counter. */ - if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { + if ((evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) && + !armv8pmu_event_get_threshold(&event->attr)) { if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) return ARMV8_IDX_CYCLE_COUNTER; else if (armv8pmu_event_is_64bit(event) && @@ -1033,7 +1039,7 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, * If FEAT_PMUv3_TH isn't implemented, then THWIDTH (threshold_max) will * be 0 and will also trigger this check, preventing it from being used. */ - th = ATTR_CFG_GET_FLD(attr, threshold); + th = armv8pmu_event_get_threshold(attr); if (th > threshold_max(cpu_pmu)) { pr_debug("PMU event threshold exceeds max value\n"); return -EINVAL; diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c index 95924a09960c..6113f0022e6e 100644 --- a/drivers/phy/cadence/phy-cadence-torrent.c +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -1156,6 +1156,9 @@ static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy, ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK, read_val, (read_val & mask) == value, 0, POLL_TIMEOUT_US); + if (ret) + return ret; + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000); ndelay(100); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c index 6c796723c8f5..8fcdcb193d24 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c @@ -3730,14 +3730,11 @@ static int phy_aux_clk_register(struct qmp_pcie *qmp, struct device_node *np) { struct clk_fixed_rate *fixed = &qmp->aux_clk_fixed; struct clk_init_data init = { }; - int ret; + char name[64]; - ret = of_property_read_string_index(np, "clock-output-names", 1, &init.name); - if (ret) { - dev_err(qmp->dev, "%pOFn: No clock-output-names index 1\n", np); - return ret; - } + snprintf(name, sizeof(name), "%s::phy_aux_clk", dev_name(qmp->dev)); + init.name = name; init.ops = &clk_fixed_rate_ops; fixed->fixed_rate = qmp->cfg->aux_clock_rate; diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig index 08b0f4345760..490263375057 100644 --- a/drivers/phy/rockchip/Kconfig +++ b/drivers/phy/rockchip/Kconfig @@ -86,7 +86,9 @@ config PHY_ROCKCHIP_PCIE config PHY_ROCKCHIP_SAMSUNG_HDPTX tristate "Rockchip Samsung HDMI/eDP Combo PHY driver" depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF + depends on HAS_IOMEM select GENERIC_PHY + select MFD_SYSCON select RATIONAL help Enable this to support the Rockchip HDMI/eDP Combo PHY diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c index dc8319bda43d..f2bff7f25f05 100644 --- a/drivers/phy/xilinx/phy-zynqmp.c +++ b/drivers/phy/xilinx/phy-zynqmp.c @@ -80,7 +80,8 @@ /* Reference clock selection parameters */ #define L0_Ln_REF_CLK_SEL(n) (0x2860 + (n) * 4) -#define L0_REF_CLK_SEL_MASK 0x8f +#define L0_REF_CLK_LCL_SEL BIT(7) +#define L0_REF_CLK_SEL_MASK 0x9f /* Calibration digital logic parameters */ #define L3_TM_CALIB_DIG19 0xec4c @@ -349,11 +350,12 @@ static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy) PLL_FREQ_MASK, ssc->pll_ref_clk); /* Enable lane clock sharing, if required */ - if (gtr_phy->refclk != gtr_phy->lane) { - /* Lane3 Ref Clock Selection Register */ + if (gtr_phy->refclk == gtr_phy->lane) + xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane), + L0_REF_CLK_SEL_MASK, L0_REF_CLK_LCL_SEL); + else xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane), L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk); - } /* SSC step size [7:0] */ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB, @@ -573,7 +575,7 @@ static int xpsgtr_phy_init(struct phy *phy) mutex_lock(>r_dev->gtr_mutex); /* Configure and enable the clock when peripheral phy_init call */ - if (clk_prepare_enable(gtr_dev->clk[gtr_phy->lane])) + if (clk_prepare_enable(gtr_dev->clk[gtr_phy->refclk])) goto out; /* Skip initialization if not required. */ @@ -625,7 +627,7 @@ static int xpsgtr_phy_exit(struct phy *phy) gtr_phy->skip_phy_init = false; /* Ensure that disable clock only, which configure for lane */ - clk_disable_unprepare(gtr_dev->clk[gtr_phy->lane]); + clk_disable_unprepare(gtr_dev->clk[gtr_phy->refclk]); return 0; } diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index f424a57f0013..4438f3b4b5ef 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -2080,6 +2080,14 @@ pinctrl_init_controller(struct pinctrl_desc *pctldesc, struct device *dev, return ERR_PTR(ret); } +static void pinctrl_uninit_controller(struct pinctrl_dev *pctldev, struct pinctrl_desc *pctldesc) +{ + pinctrl_free_pindescs(pctldev, pctldesc->pins, + pctldesc->npins); + mutex_destroy(&pctldev->mutex); + kfree(pctldev); +} + static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev) { pctldev->p = create_pinctrl(pctldev->dev, pctldev); @@ -2160,8 +2168,10 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, return pctldev; error = pinctrl_enable(pctldev); - if (error) + if (error) { + pinctrl_uninit_controller(pctldev, pctldesc); return ERR_PTR(error); + } return pctldev; } diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c index e77311f26262..4813a9e16cb3 100644 --- a/drivers/pinctrl/freescale/pinctrl-mxs.c +++ b/drivers/pinctrl/freescale/pinctrl-mxs.c @@ -413,8 +413,8 @@ static int mxs_pinctrl_probe_dt(struct platform_device *pdev, int ret; u32 val; - child = of_get_next_child(np, NULL); - if (!child) { + val = of_get_child_count(np); + if (val == 0) { dev_err(&pdev->dev, "no group is defined\n"); return -ENOENT; } diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 3f56991f5b89..6a7461978630 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -915,9 +915,8 @@ static struct rockchip_mux_route_data rk3308_mux_route_data[] = { RK_MUXROUTE_SAME(0, RK_PC3, 1, 0x314, BIT(16 + 0) | BIT(0)), /* rtc_clk */ RK_MUXROUTE_SAME(1, RK_PC6, 2, 0x314, BIT(16 + 2) | BIT(16 + 3)), /* uart2_rxm0 */ RK_MUXROUTE_SAME(4, RK_PD2, 2, 0x314, BIT(16 + 2) | BIT(16 + 3) | BIT(2)), /* uart2_rxm1 */ - RK_MUXROUTE_SAME(0, RK_PB7, 2, 0x608, BIT(16 + 8) | BIT(16 + 9)), /* i2c3_sdam0 */ - RK_MUXROUTE_SAME(3, RK_PB4, 2, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(8)), /* i2c3_sdam1 */ - RK_MUXROUTE_SAME(2, RK_PA0, 3, 0x608, BIT(16 + 8) | BIT(16 + 9) | BIT(9)), /* i2c3_sdam2 */ + RK_MUXROUTE_SAME(0, RK_PB7, 2, 0x314, BIT(16 + 4)), /* i2c3_sdam0 */ + RK_MUXROUTE_SAME(3, RK_PB4, 2, 0x314, BIT(16 + 4) | BIT(4)), /* i2c3_sdam1 */ RK_MUXROUTE_SAME(1, RK_PA3, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclktxm0 */ RK_MUXROUTE_SAME(1, RK_PA4, 2, 0x308, BIT(16 + 3)), /* i2s-8ch-1-sclkrxm0 */ RK_MUXROUTE_SAME(1, RK_PB5, 2, 0x308, BIT(16 + 3) | BIT(3)), /* i2s-8ch-1-sclktxm1 */ @@ -926,18 +925,6 @@ static struct rockchip_mux_route_data rk3308_mux_route_data[] = { RK_MUXROUTE_SAME(1, RK_PB6, 4, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* pdm-clkm1 */ RK_MUXROUTE_SAME(2, RK_PA6, 2, 0x308, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* pdm-clkm2 */ RK_MUXROUTE_SAME(2, RK_PA4, 3, 0x600, BIT(16 + 2) | BIT(2)), /* pdm-clkm-m2 */ - RK_MUXROUTE_SAME(3, RK_PB2, 3, 0x314, BIT(16 + 9)), /* spi1_miso */ - RK_MUXROUTE_SAME(2, RK_PA4, 2, 0x314, BIT(16 + 9) | BIT(9)), /* spi1_miso_m1 */ - RK_MUXROUTE_SAME(0, RK_PB3, 3, 0x314, BIT(16 + 10) | BIT(16 + 11)), /* owire_m0 */ - RK_MUXROUTE_SAME(1, RK_PC6, 7, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(10)), /* owire_m1 */ - RK_MUXROUTE_SAME(2, RK_PA2, 5, 0x314, BIT(16 + 10) | BIT(16 + 11) | BIT(11)), /* owire_m2 */ - RK_MUXROUTE_SAME(0, RK_PB3, 2, 0x314, BIT(16 + 12) | BIT(16 + 13)), /* can_rxd_m0 */ - RK_MUXROUTE_SAME(1, RK_PC6, 5, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(12)), /* can_rxd_m1 */ - RK_MUXROUTE_SAME(2, RK_PA2, 4, 0x314, BIT(16 + 12) | BIT(16 + 13) | BIT(13)), /* can_rxd_m2 */ - RK_MUXROUTE_SAME(1, RK_PC4, 3, 0x314, BIT(16 + 14)), /* mac_rxd0_m0 */ - RK_MUXROUTE_SAME(4, RK_PA2, 2, 0x314, BIT(16 + 14) | BIT(14)), /* mac_rxd0_m1 */ - RK_MUXROUTE_SAME(3, RK_PB4, 4, 0x314, BIT(16 + 15)), /* uart3_rx */ - RK_MUXROUTE_SAME(0, RK_PC1, 3, 0x314, BIT(16 + 15) | BIT(15)), /* uart3_rx_m1 */ }; static struct rockchip_mux_route_data rk3328_mux_route_data[] = { diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index a798f31d6954..4c6bfabb6bd7 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1329,7 +1329,6 @@ static void pcs_irq_free(struct pcs_device *pcs) static void pcs_free_resources(struct pcs_device *pcs) { pcs_irq_free(pcs); - pinctrl_unregister(pcs->pctl); #if IS_BUILTIN(CONFIG_PINCTRL_SINGLE) if (pcs->missing_nr_pinctrl_cells) @@ -1879,7 +1878,7 @@ static int pcs_probe(struct platform_device *pdev) if (ret < 0) goto free; - ret = pinctrl_register_and_init(&pcs->desc, pcs->dev, pcs, &pcs->pctl); + ret = devm_pinctrl_register_and_init(pcs->dev, &pcs->desc, pcs, &pcs->pctl); if (ret) { dev_err(pcs->dev, "could not register single pinctrl driver\n"); goto free; @@ -1912,8 +1911,10 @@ static int pcs_probe(struct platform_device *pdev) dev_info(pcs->dev, "%i pins, size %u\n", pcs->desc.npins, pcs->size); - return pinctrl_enable(pcs->pctl); + if (pinctrl_enable(pcs->pctl)) + goto free; + return 0; free: pcs_free_resources(pcs); diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c index d2de526a3b58..bb843e333c88 100644 --- a/drivers/pinctrl/renesas/pfc-r8a779g0.c +++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c @@ -68,20 +68,20 @@ #define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4) #define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0) #define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28) -#define GPSR0_6 F_(IRQ0, IP0SR0_27_24) -#define GPSR0_5 F_(IRQ1, IP0SR0_23_20) -#define GPSR0_4 F_(IRQ2, IP0SR0_19_16) -#define GPSR0_3 F_(IRQ3, IP0SR0_15_12) +#define GPSR0_6 F_(IRQ0_A, IP0SR0_27_24) +#define GPSR0_5 F_(IRQ1_A, IP0SR0_23_20) +#define GPSR0_4 F_(IRQ2_A, IP0SR0_19_16) +#define GPSR0_3 F_(IRQ3_A, IP0SR0_15_12) #define GPSR0_2 F_(GP0_02, IP0SR0_11_8) #define GPSR0_1 F_(GP0_01, IP0SR0_7_4) #define GPSR0_0 F_(GP0_00, IP0SR0_3_0) /* GPSR1 */ -#define GPSR1_28 F_(HTX3, IP3SR1_19_16) -#define GPSR1_27 F_(HCTS3_N, IP3SR1_15_12) -#define GPSR1_26 F_(HRTS3_N, IP3SR1_11_8) -#define GPSR1_25 F_(HSCK3, IP3SR1_7_4) -#define GPSR1_24 F_(HRX3, IP3SR1_3_0) +#define GPSR1_28 F_(HTX3_A, IP3SR1_19_16) +#define GPSR1_27 F_(HCTS3_N_A, IP3SR1_15_12) +#define GPSR1_26 F_(HRTS3_N_A, IP3SR1_11_8) +#define GPSR1_25 F_(HSCK3_A, IP3SR1_7_4) +#define GPSR1_24 F_(HRX3_A, IP3SR1_3_0) #define GPSR1_23 F_(GP1_23, IP2SR1_31_28) #define GPSR1_22 F_(AUDIO_CLKIN, IP2SR1_27_24) #define GPSR1_21 F_(AUDIO_CLKOUT, IP2SR1_23_20) @@ -119,14 +119,14 @@ #define GPSR2_11 F_(CANFD0_RX, IP1SR2_15_12) #define GPSR2_10 F_(CANFD0_TX, IP1SR2_11_8) #define GPSR2_9 F_(CAN_CLK, IP1SR2_7_4) -#define GPSR2_8 F_(TPU0TO0, IP1SR2_3_0) -#define GPSR2_7 F_(TPU0TO1, IP0SR2_31_28) +#define GPSR2_8 F_(TPU0TO0_A, IP1SR2_3_0) +#define GPSR2_7 F_(TPU0TO1_A, IP0SR2_31_28) #define GPSR2_6 F_(FXR_TXDB, IP0SR2_27_24) -#define GPSR2_5 F_(FXR_TXENB_N, IP0SR2_23_20) +#define GPSR2_5 F_(FXR_TXENB_N_A, IP0SR2_23_20) #define GPSR2_4 F_(RXDB_EXTFXR, IP0SR2_19_16) #define GPSR2_3 F_(CLK_EXTFXR, IP0SR2_15_12) #define GPSR2_2 F_(RXDA_EXTFXR, IP0SR2_11_8) -#define GPSR2_1 F_(FXR_TXENA_N, IP0SR2_7_4) +#define GPSR2_1 F_(FXR_TXENA_N_A, IP0SR2_7_4) #define GPSR2_0 F_(FXR_TXDA, IP0SR2_3_0) /* GPSR3 */ @@ -275,13 +275,13 @@ /* SR0 */ /* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ -#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_N_B) FM(TCLK2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_N_B) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_15_12 FM(IRQ3_A) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_19_16 FM(IRQ2_A) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_23_20 FM(IRQ1_A) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR0_27_24 FM(IRQ0_A) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ @@ -290,72 +290,72 @@ #define IP1SR0_11_8 FM(MSIOF5_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR0_15_12 FM(MSIOF5_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR0_19_16 FM(MSIOF5_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1) FM(IRQ2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1) FM(TX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1) FM(RX1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR0_23_20 FM(MSIOF2_SS2) FM(TCLK1_A) FM(IRQ2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR0_27_24 FM(MSIOF2_SS1) FM(HTX1_A) FM(TX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR0_31_28 FM(MSIOF2_SYNC) FM(HRX1_A) FM(RX1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP2SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ -#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N) FM(CTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N) FM(RTS1_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1) FM(SCK1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR0_3_0 FM(MSIOF2_TXD) FM(HCTS1_N_A) FM(CTS1_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR0_7_4 FM(MSIOF2_SCK) FM(HRTS1_N_A) FM(RTS1_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR0_11_8 FM(MSIOF2_RXD) FM(HSCK1_A) FM(SCK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* SR1 */ /* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ -#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_A) FM(TX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_A) FM(RX3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_A) FM(RTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_A) FM(CTS3_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_A) FM(SCK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_3_0 FM(MSIOF1_SS2) FM(HTX3_B) FM(TX3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_7_4 FM(MSIOF1_SS1) FM(HCTS3_N_B) FM(RX3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_11_8 FM(MSIOF1_SYNC) FM(HRTS3_N_B) FM(RTS3_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_15_12 FM(MSIOF1_SCK) FM(HSCK3_B) FM(CTS3_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_19_16 FM(MSIOF1_TXD) FM(HRX3_B) FM(SCK3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR1_23_20 FM(MSIOF1_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_X) FM(TX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_X) FM(RX1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_27_24 FM(MSIOF0_SS2) FM(HTX1_B) FM(TX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR1_31_28 FM(MSIOF0_SS1) FM(HRX1_B) FM(RX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP1SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ -#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_X) FM(CTS1_N_X) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_X) FM(RTS1_N_X) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_X) FM(SCK1_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_3_0 FM(MSIOF0_SYNC) FM(HCTS1_N_B) FM(CTS1_N_B) FM(CANFD5_TX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_7_4 FM(MSIOF0_TXD) FM(HRTS1_N_B) FM(RTS1_N_B) FM(CANFD5_RX_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_11_8 FM(MSIOF0_SCK) FM(HSCK1_B) FM(SCK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR1_15_12 FM(MSIOF0_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR1_19_16 FM(HTX0) FM(TX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_23_20 FM(HCTS0_N) FM(CTS0_N) FM(PWM8) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_27_24 FM(HRTS0_N) FM(RTS0_N) FM(PWM9) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR1_31_28 FM(HSCK0) FM(SCK0) FM(PWM0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP2SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ #define IP2SR1_3_0 FM(HRX0) FM(RX0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2SR1_7_4 FM(SCIF_CLK) FM(IRQ4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR1_11_8 FM(SSI_SCK) FM(TCLK3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR1_15_12 FM(SSI_WS) FM(TCLK4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR1_19_16 FM(SSI_SD) FM(IRQ0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR1_23_20 FM(AUDIO_CLKOUT) FM(IRQ1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2SR1_27_24 FM(AUDIO_CLKIN) FM(PWM3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR1_31_28 F_(0, 0) FM(TCLK2) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP2SR1_31_28 F_(0, 0) FM(TCLK2_A) FM(MSIOF4_SS1) FM(IRQ3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP3SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ -#define IP3SR1_3_0 FM(HRX3) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP3SR1_7_4 FM(HSCK3) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP3SR1_11_8 FM(HRTS3_N) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP3SR1_15_12 FM(HCTS3_N) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP3SR1_19_16 FM(HTX3) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR1_3_0 FM(HRX3_A) FM(SCK3_A) FM(MSIOF4_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR1_7_4 FM(HSCK3_A) FM(CTS3_N_A) FM(MSIOF4_SCK) FM(TPU0TO0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR1_11_8 FM(HRTS3_N_A) FM(RTS3_N_A) FM(MSIOF4_TXD) FM(TPU0TO1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR1_15_12 FM(HCTS3_N_A) FM(RX3_A) FM(MSIOF4_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP3SR1_19_16 FM(HTX3_A) FM(TX3_A) FM(MSIOF4_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* SR2 */ /* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ -#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR2_7_4 FM(FXR_TXENA_N) FM(CANFD1_RX) FM(TPU0TO3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_3_0 FM(FXR_TXDA) FM(CANFD1_TX) FM(TPU0TO2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_7_4 FM(FXR_TXENA_N_A) FM(CANFD1_RX) FM(TPU0TO3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_11_8 FM(RXDA_EXTFXR) FM(CANFD5_TX_A) FM(IRQ5) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_15_12 FM(CLK_EXTFXR) FM(CANFD5_RX_A) FM(IRQ4_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR2_19_16 FM(RXDB_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR2_23_20 FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_23_20 FM(FXR_TXENB_N_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR2_27_24 FM(FXR_TXDB) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR2_31_28 FM(TPU0TO1) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP0SR2_31_28 FM(TPU0TO1_A) FM(CANFD6_TX) F_(0, 0) FM(TCLK2_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP1SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ -#define IP1SR2_3_0 FM(TPU0TO0) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_3_0 FM(TPU0TO0_A) FM(CANFD6_RX) F_(0, 0) FM(TCLK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_7_4 FM(CAN_CLK) FM(FXR_TXENA_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_11_8 FM(CANFD0_TX) FM(FXR_TXENB_N_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR2_15_12 FM(CANFD0_RX) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2) F_(0, 0) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1_B) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_19_16 FM(CANFD2_TX) FM(TPU0TO2_A) F_(0, 0) FM(TCLK3_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_23_20 FM(CANFD2_RX) FM(TPU0TO3_A) FM(PWM1_B) FM(TCLK4_C) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR2_27_24 FM(CANFD3_TX) F_(0, 0) FM(PWM2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR2_31_28 FM(CANFD3_RX) F_(0, 0) FM(PWM3_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP2SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ @@ -381,8 +381,8 @@ #define IP1SR3_11_8 FM(MMC_SD_CMD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR3_15_12 FM(SD_CD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR3_19_16 FM(SD_WP) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_N_A) FM(TCLK4_X) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR3_23_20 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) FM(PWM1_A) FM(TCLK3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) +#define IP1SR3_27_24 FM(IPC_CLKOUT) FM(IPC_CLKEN_OUT) FM(ERROROUTC_N_A) FM(TCLK4_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR3_31_28 FM(QSPI0_SSL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP2SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */ @@ -718,22 +718,22 @@ static const u16 pinmux_data[] = { /* IP0SR0 */ PINMUX_IPSR_GPSR(IP0SR0_3_0, ERROROUTC_N_B), - PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_A), + PINMUX_IPSR_GPSR(IP0SR0_3_0, TCLK2_B), PINMUX_IPSR_GPSR(IP0SR0_7_4, MSIOF3_SS1), PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2), - PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3), + PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3_A), PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK), - PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2), + PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2_A), PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD), - PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1), + PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1_A), PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD), - PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0), + PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0_A), PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC), PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2), @@ -750,75 +750,75 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP1SR0_19_16, MSIOF5_RXD), PINMUX_IPSR_GPSR(IP1SR0_23_20, MSIOF2_SS2), - PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1), - PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_A), + PINMUX_IPSR_GPSR(IP1SR0_23_20, TCLK1_A), + PINMUX_IPSR_GPSR(IP1SR0_23_20, IRQ2_B), PINMUX_IPSR_GPSR(IP1SR0_27_24, MSIOF2_SS1), - PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1), - PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1), + PINMUX_IPSR_GPSR(IP1SR0_27_24, HTX1_A), + PINMUX_IPSR_GPSR(IP1SR0_27_24, TX1_A), PINMUX_IPSR_GPSR(IP1SR0_31_28, MSIOF2_SYNC), - PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1), - PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1), + PINMUX_IPSR_GPSR(IP1SR0_31_28, HRX1_A), + PINMUX_IPSR_GPSR(IP1SR0_31_28, RX1_A), /* IP2SR0 */ PINMUX_IPSR_GPSR(IP2SR0_3_0, MSIOF2_TXD), - PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N), - PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N), + PINMUX_IPSR_GPSR(IP2SR0_3_0, HCTS1_N_A), + PINMUX_IPSR_GPSR(IP2SR0_3_0, CTS1_N_A), PINMUX_IPSR_GPSR(IP2SR0_7_4, MSIOF2_SCK), - PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N), - PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N), + PINMUX_IPSR_GPSR(IP2SR0_7_4, HRTS1_N_A), + PINMUX_IPSR_GPSR(IP2SR0_7_4, RTS1_N_A), PINMUX_IPSR_GPSR(IP2SR0_11_8, MSIOF2_RXD), - PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1), - PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1), + PINMUX_IPSR_GPSR(IP2SR0_11_8, HSCK1_A), + PINMUX_IPSR_GPSR(IP2SR0_11_8, SCK1_A), /* IP0SR1 */ PINMUX_IPSR_GPSR(IP0SR1_3_0, MSIOF1_SS2), - PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_A), - PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3), + PINMUX_IPSR_GPSR(IP0SR1_3_0, HTX3_B), + PINMUX_IPSR_GPSR(IP0SR1_3_0, TX3_B), PINMUX_IPSR_GPSR(IP0SR1_7_4, MSIOF1_SS1), - PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_A), - PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3), + PINMUX_IPSR_GPSR(IP0SR1_7_4, HCTS3_N_B), + PINMUX_IPSR_GPSR(IP0SR1_7_4, RX3_B), PINMUX_IPSR_GPSR(IP0SR1_11_8, MSIOF1_SYNC), - PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_A), - PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N), + PINMUX_IPSR_GPSR(IP0SR1_11_8, HRTS3_N_B), + PINMUX_IPSR_GPSR(IP0SR1_11_8, RTS3_N_B), PINMUX_IPSR_GPSR(IP0SR1_15_12, MSIOF1_SCK), - PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_A), - PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N), + PINMUX_IPSR_GPSR(IP0SR1_15_12, HSCK3_B), + PINMUX_IPSR_GPSR(IP0SR1_15_12, CTS3_N_B), PINMUX_IPSR_GPSR(IP0SR1_19_16, MSIOF1_TXD), - PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_A), - PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3), + PINMUX_IPSR_GPSR(IP0SR1_19_16, HRX3_B), + PINMUX_IPSR_GPSR(IP0SR1_19_16, SCK3_B), PINMUX_IPSR_GPSR(IP0SR1_23_20, MSIOF1_RXD), PINMUX_IPSR_GPSR(IP0SR1_27_24, MSIOF0_SS2), - PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_X), - PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_X), + PINMUX_IPSR_GPSR(IP0SR1_27_24, HTX1_B), + PINMUX_IPSR_GPSR(IP0SR1_27_24, TX1_B), PINMUX_IPSR_GPSR(IP0SR1_31_28, MSIOF0_SS1), - PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_X), - PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_X), + PINMUX_IPSR_GPSR(IP0SR1_31_28, HRX1_B), + PINMUX_IPSR_GPSR(IP0SR1_31_28, RX1_B), /* IP1SR1 */ PINMUX_IPSR_GPSR(IP1SR1_3_0, MSIOF0_SYNC), - PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_X), - PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_X), + PINMUX_IPSR_GPSR(IP1SR1_3_0, HCTS1_N_B), + PINMUX_IPSR_GPSR(IP1SR1_3_0, CTS1_N_B), PINMUX_IPSR_GPSR(IP1SR1_3_0, CANFD5_TX_B), PINMUX_IPSR_GPSR(IP1SR1_7_4, MSIOF0_TXD), - PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_X), - PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_X), + PINMUX_IPSR_GPSR(IP1SR1_7_4, HRTS1_N_B), + PINMUX_IPSR_GPSR(IP1SR1_7_4, RTS1_N_B), PINMUX_IPSR_GPSR(IP1SR1_7_4, CANFD5_RX_B), PINMUX_IPSR_GPSR(IP1SR1_11_8, MSIOF0_SCK), - PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_X), - PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_X), + PINMUX_IPSR_GPSR(IP1SR1_11_8, HSCK1_B), + PINMUX_IPSR_GPSR(IP1SR1_11_8, SCK1_B), PINMUX_IPSR_GPSR(IP1SR1_15_12, MSIOF0_RXD), @@ -827,15 +827,15 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP1SR1_23_20, HCTS0_N), PINMUX_IPSR_GPSR(IP1SR1_23_20, CTS0_N), - PINMUX_IPSR_GPSR(IP1SR1_23_20, PWM8_A), + PINMUX_IPSR_GPSR(IP1SR1_23_20, PWM8), PINMUX_IPSR_GPSR(IP1SR1_27_24, HRTS0_N), PINMUX_IPSR_GPSR(IP1SR1_27_24, RTS0_N), - PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM9_A), + PINMUX_IPSR_GPSR(IP1SR1_27_24, PWM9), PINMUX_IPSR_GPSR(IP1SR1_31_28, HSCK0), PINMUX_IPSR_GPSR(IP1SR1_31_28, SCK0), - PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0_A), + PINMUX_IPSR_GPSR(IP1SR1_31_28, PWM0), /* IP2SR1 */ PINMUX_IPSR_GPSR(IP2SR1_3_0, HRX0), @@ -845,99 +845,99 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP2SR1_7_4, IRQ4_A), PINMUX_IPSR_GPSR(IP2SR1_11_8, SSI_SCK), - PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3), + PINMUX_IPSR_GPSR(IP2SR1_11_8, TCLK3_B), PINMUX_IPSR_GPSR(IP2SR1_15_12, SSI_WS), - PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4), + PINMUX_IPSR_GPSR(IP2SR1_15_12, TCLK4_B), PINMUX_IPSR_GPSR(IP2SR1_19_16, SSI_SD), - PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_A), + PINMUX_IPSR_GPSR(IP2SR1_19_16, IRQ0_B), PINMUX_IPSR_GPSR(IP2SR1_23_20, AUDIO_CLKOUT), - PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_A), + PINMUX_IPSR_GPSR(IP2SR1_23_20, IRQ1_B), PINMUX_IPSR_GPSR(IP2SR1_27_24, AUDIO_CLKIN), PINMUX_IPSR_GPSR(IP2SR1_27_24, PWM3_A), - PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2), + PINMUX_IPSR_GPSR(IP2SR1_31_28, TCLK2_A), PINMUX_IPSR_GPSR(IP2SR1_31_28, MSIOF4_SS1), PINMUX_IPSR_GPSR(IP2SR1_31_28, IRQ3_B), /* IP3SR1 */ - PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3), + PINMUX_IPSR_GPSR(IP3SR1_3_0, HRX3_A), PINMUX_IPSR_GPSR(IP3SR1_3_0, SCK3_A), PINMUX_IPSR_GPSR(IP3SR1_3_0, MSIOF4_SS2), - PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3), + PINMUX_IPSR_GPSR(IP3SR1_7_4, HSCK3_A), PINMUX_IPSR_GPSR(IP3SR1_7_4, CTS3_N_A), PINMUX_IPSR_GPSR(IP3SR1_7_4, MSIOF4_SCK), - PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_A), + PINMUX_IPSR_GPSR(IP3SR1_7_4, TPU0TO0_B), - PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N), + PINMUX_IPSR_GPSR(IP3SR1_11_8, HRTS3_N_A), PINMUX_IPSR_GPSR(IP3SR1_11_8, RTS3_N_A), PINMUX_IPSR_GPSR(IP3SR1_11_8, MSIOF4_TXD), - PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_A), + PINMUX_IPSR_GPSR(IP3SR1_11_8, TPU0TO1_B), - PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N), + PINMUX_IPSR_GPSR(IP3SR1_15_12, HCTS3_N_A), PINMUX_IPSR_GPSR(IP3SR1_15_12, RX3_A), PINMUX_IPSR_GPSR(IP3SR1_15_12, MSIOF4_RXD), - PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3), + PINMUX_IPSR_GPSR(IP3SR1_19_16, HTX3_A), PINMUX_IPSR_GPSR(IP3SR1_19_16, TX3_A), PINMUX_IPSR_GPSR(IP3SR1_19_16, MSIOF4_SYNC), /* IP0SR2 */ PINMUX_IPSR_GPSR(IP0SR2_3_0, FXR_TXDA), PINMUX_IPSR_GPSR(IP0SR2_3_0, CANFD1_TX), - PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_A), + PINMUX_IPSR_GPSR(IP0SR2_3_0, TPU0TO2_B), - PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N), + PINMUX_IPSR_GPSR(IP0SR2_7_4, FXR_TXENA_N_A), PINMUX_IPSR_GPSR(IP0SR2_7_4, CANFD1_RX), - PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_A), + PINMUX_IPSR_GPSR(IP0SR2_7_4, TPU0TO3_B), PINMUX_IPSR_GPSR(IP0SR2_11_8, RXDA_EXTFXR), - PINMUX_IPSR_GPSR(IP0SR2_11_8, CANFD5_TX), + PINMUX_IPSR_GPSR(IP0SR2_11_8, CANFD5_TX_A), PINMUX_IPSR_GPSR(IP0SR2_11_8, IRQ5), PINMUX_IPSR_GPSR(IP0SR2_15_12, CLK_EXTFXR), - PINMUX_IPSR_GPSR(IP0SR2_15_12, CANFD5_RX), + PINMUX_IPSR_GPSR(IP0SR2_15_12, CANFD5_RX_A), PINMUX_IPSR_GPSR(IP0SR2_15_12, IRQ4_B), PINMUX_IPSR_GPSR(IP0SR2_19_16, RXDB_EXTFXR), - PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N), + PINMUX_IPSR_GPSR(IP0SR2_23_20, FXR_TXENB_N_A), PINMUX_IPSR_GPSR(IP0SR2_27_24, FXR_TXDB), - PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1), + PINMUX_IPSR_GPSR(IP0SR2_31_28, TPU0TO1_A), PINMUX_IPSR_GPSR(IP0SR2_31_28, CANFD6_TX), - PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_B), + PINMUX_IPSR_GPSR(IP0SR2_31_28, TCLK2_C), /* IP1SR2 */ - PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0), + PINMUX_IPSR_GPSR(IP1SR2_3_0, TPU0TO0_A), PINMUX_IPSR_GPSR(IP1SR2_3_0, CANFD6_RX), - PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_A), + PINMUX_IPSR_GPSR(IP1SR2_3_0, TCLK1_B), PINMUX_IPSR_GPSR(IP1SR2_7_4, CAN_CLK), - PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_X), + PINMUX_IPSR_GPSR(IP1SR2_7_4, FXR_TXENA_N_B), PINMUX_IPSR_GPSR(IP1SR2_11_8, CANFD0_TX), - PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_X), + PINMUX_IPSR_GPSR(IP1SR2_11_8, FXR_TXENB_N_B), PINMUX_IPSR_GPSR(IP1SR2_15_12, CANFD0_RX), PINMUX_IPSR_GPSR(IP1SR2_15_12, STPWT_EXTFXR), PINMUX_IPSR_GPSR(IP1SR2_19_16, CANFD2_TX), - PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2), - PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_A), + PINMUX_IPSR_GPSR(IP1SR2_19_16, TPU0TO2_A), + PINMUX_IPSR_GPSR(IP1SR2_19_16, TCLK3_C), PINMUX_IPSR_GPSR(IP1SR2_23_20, CANFD2_RX), - PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3), + PINMUX_IPSR_GPSR(IP1SR2_23_20, TPU0TO3_A), PINMUX_IPSR_GPSR(IP1SR2_23_20, PWM1_B), - PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_A), + PINMUX_IPSR_GPSR(IP1SR2_23_20, TCLK4_C), PINMUX_IPSR_GPSR(IP1SR2_27_24, CANFD3_TX), - PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2_B), + PINMUX_IPSR_GPSR(IP1SR2_27_24, PWM2), PINMUX_IPSR_GPSR(IP1SR2_31_28, CANFD3_RX), PINMUX_IPSR_GPSR(IP1SR2_31_28, PWM3_B), @@ -979,12 +979,12 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKIN), PINMUX_IPSR_GPSR(IP1SR3_23_20, IPC_CLKEN_IN), PINMUX_IPSR_GPSR(IP1SR3_23_20, PWM1_A), - PINMUX_IPSR_GPSR(IP1SR3_23_20, TCLK3_X), + PINMUX_IPSR_GPSR(IP1SR3_23_20, TCLK3_A), PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKOUT), PINMUX_IPSR_GPSR(IP1SR3_27_24, IPC_CLKEN_OUT), PINMUX_IPSR_GPSR(IP1SR3_27_24, ERROROUTC_N_A), - PINMUX_IPSR_GPSR(IP1SR3_27_24, TCLK4_X), + PINMUX_IPSR_GPSR(IP1SR3_27_24, TCLK4_A), PINMUX_IPSR_GPSR(IP1SR3_31_28, QSPI0_SSL), @@ -1531,15 +1531,14 @@ static const unsigned int canfd4_data_mux[] = { }; /* - CANFD5 ----------------------------------------------------------------- */ -static const unsigned int canfd5_data_pins[] = { - /* CANFD5_TX, CANFD5_RX */ +static const unsigned int canfd5_data_a_pins[] = { + /* CANFD5_TX_A, CANFD5_RX_A */ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3), }; -static const unsigned int canfd5_data_mux[] = { - CANFD5_TX_MARK, CANFD5_RX_MARK, +static const unsigned int canfd5_data_a_mux[] = { + CANFD5_TX_A_MARK, CANFD5_RX_A_MARK, }; -/* - CANFD5_B ----------------------------------------------------------------- */ static const unsigned int canfd5_data_b_pins[] = { /* CANFD5_TX_B, CANFD5_RX_B */ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 9), @@ -1599,49 +1598,48 @@ static const unsigned int hscif0_ctrl_mux[] = { }; /* - HSCIF1 ----------------------------------------------------------------- */ -static const unsigned int hscif1_data_pins[] = { - /* HRX1, HTX1 */ +static const unsigned int hscif1_data_a_pins[] = { + /* HRX1_A, HTX1_A */ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), }; -static const unsigned int hscif1_data_mux[] = { - HRX1_MARK, HTX1_MARK, +static const unsigned int hscif1_data_a_mux[] = { + HRX1_A_MARK, HTX1_A_MARK, }; -static const unsigned int hscif1_clk_pins[] = { - /* HSCK1 */ +static const unsigned int hscif1_clk_a_pins[] = { + /* HSCK1_A */ RCAR_GP_PIN(0, 18), }; -static const unsigned int hscif1_clk_mux[] = { - HSCK1_MARK, +static const unsigned int hscif1_clk_a_mux[] = { + HSCK1_A_MARK, }; -static const unsigned int hscif1_ctrl_pins[] = { - /* HRTS1_N, HCTS1_N */ +static const unsigned int hscif1_ctrl_a_pins[] = { + /* HRTS1_N_A, HCTS1_N_A */ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16), }; -static const unsigned int hscif1_ctrl_mux[] = { - HRTS1_N_MARK, HCTS1_N_MARK, +static const unsigned int hscif1_ctrl_a_mux[] = { + HRTS1_N_A_MARK, HCTS1_N_A_MARK, }; -/* - HSCIF1_X---------------------------------------------------------------- */ -static const unsigned int hscif1_data_x_pins[] = { - /* HRX1_X, HTX1_X */ +static const unsigned int hscif1_data_b_pins[] = { + /* HRX1_B, HTX1_B */ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), }; -static const unsigned int hscif1_data_x_mux[] = { - HRX1_X_MARK, HTX1_X_MARK, +static const unsigned int hscif1_data_b_mux[] = { + HRX1_B_MARK, HTX1_B_MARK, }; -static const unsigned int hscif1_clk_x_pins[] = { - /* HSCK1_X */ +static const unsigned int hscif1_clk_b_pins[] = { + /* HSCK1_B */ RCAR_GP_PIN(1, 10), }; -static const unsigned int hscif1_clk_x_mux[] = { - HSCK1_X_MARK, +static const unsigned int hscif1_clk_b_mux[] = { + HSCK1_B_MARK, }; -static const unsigned int hscif1_ctrl_x_pins[] = { - /* HRTS1_N_X, HCTS1_N_X */ +static const unsigned int hscif1_ctrl_b_pins[] = { + /* HRTS1_N_B, HCTS1_N_B */ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8), }; -static const unsigned int hscif1_ctrl_x_mux[] = { - HRTS1_N_X_MARK, HCTS1_N_X_MARK, +static const unsigned int hscif1_ctrl_b_mux[] = { + HRTS1_N_B_MARK, HCTS1_N_B_MARK, }; /* - HSCIF2 ----------------------------------------------------------------- */ @@ -1668,49 +1666,48 @@ static const unsigned int hscif2_ctrl_mux[] = { }; /* - HSCIF3 ----------------------------------------------------------------- */ -static const unsigned int hscif3_data_pins[] = { - /* HRX3, HTX3 */ +static const unsigned int hscif3_data_a_pins[] = { + /* HRX3_A, HTX3_A */ RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 28), }; -static const unsigned int hscif3_data_mux[] = { - HRX3_MARK, HTX3_MARK, +static const unsigned int hscif3_data_a_mux[] = { + HRX3_A_MARK, HTX3_A_MARK, }; -static const unsigned int hscif3_clk_pins[] = { - /* HSCK3 */ +static const unsigned int hscif3_clk_a_pins[] = { + /* HSCK3_A */ RCAR_GP_PIN(1, 25), }; -static const unsigned int hscif3_clk_mux[] = { - HSCK3_MARK, +static const unsigned int hscif3_clk_a_mux[] = { + HSCK3_A_MARK, }; -static const unsigned int hscif3_ctrl_pins[] = { - /* HRTS3_N, HCTS3_N */ +static const unsigned int hscif3_ctrl_a_pins[] = { + /* HRTS3_N_A, HCTS3_N_A */ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 27), }; -static const unsigned int hscif3_ctrl_mux[] = { - HRTS3_N_MARK, HCTS3_N_MARK, +static const unsigned int hscif3_ctrl_a_mux[] = { + HRTS3_N_A_MARK, HCTS3_N_A_MARK, }; -/* - HSCIF3_A ----------------------------------------------------------------- */ -static const unsigned int hscif3_data_a_pins[] = { - /* HRX3_A, HTX3_A */ +static const unsigned int hscif3_data_b_pins[] = { + /* HRX3_B, HTX3_B */ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 0), }; -static const unsigned int hscif3_data_a_mux[] = { - HRX3_A_MARK, HTX3_A_MARK, +static const unsigned int hscif3_data_b_mux[] = { + HRX3_B_MARK, HTX3_B_MARK, }; -static const unsigned int hscif3_clk_a_pins[] = { - /* HSCK3_A */ +static const unsigned int hscif3_clk_b_pins[] = { + /* HSCK3_B */ RCAR_GP_PIN(1, 3), }; -static const unsigned int hscif3_clk_a_mux[] = { - HSCK3_A_MARK, +static const unsigned int hscif3_clk_b_mux[] = { + HSCK3_B_MARK, }; -static const unsigned int hscif3_ctrl_a_pins[] = { - /* HRTS3_N_A, HCTS3_N_A */ +static const unsigned int hscif3_ctrl_b_pins[] = { + /* HRTS3_N_B, HCTS3_N_B */ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 1), }; -static const unsigned int hscif3_ctrl_a_mux[] = { - HRTS3_N_A_MARK, HCTS3_N_A_MARK, +static const unsigned int hscif3_ctrl_b_mux[] = { + HRTS3_N_B_MARK, HCTS3_N_B_MARK, }; /* - I2C0 ------------------------------------------------------------------- */ @@ -2093,13 +2090,13 @@ static const unsigned int pcie1_clkreq_n_mux[] = { PCIE1_CLKREQ_N_MARK, }; -/* - PWM0_A ------------------------------------------------------------------- */ -static const unsigned int pwm0_a_pins[] = { - /* PWM0_A */ +/* - PWM0 ------------------------------------------------------------------- */ +static const unsigned int pwm0_pins[] = { + /* PWM0 */ RCAR_GP_PIN(1, 15), }; -static const unsigned int pwm0_a_mux[] = { - PWM0_A_MARK, +static const unsigned int pwm0_mux[] = { + PWM0_MARK, }; /* - PWM1_A ------------------------------------------------------------------- */ @@ -2120,13 +2117,13 @@ static const unsigned int pwm1_b_mux[] = { PWM1_B_MARK, }; -/* - PWM2_B ------------------------------------------------------------------- */ -static const unsigned int pwm2_b_pins[] = { - /* PWM2_B */ +/* - PWM2 ------------------------------------------------------------------- */ +static const unsigned int pwm2_pins[] = { + /* PWM2 */ RCAR_GP_PIN(2, 14), }; -static const unsigned int pwm2_b_mux[] = { - PWM2_B_MARK, +static const unsigned int pwm2_mux[] = { + PWM2_MARK, }; /* - PWM3_A ------------------------------------------------------------------- */ @@ -2183,22 +2180,22 @@ static const unsigned int pwm7_mux[] = { PWM7_MARK, }; -/* - PWM8_A ------------------------------------------------------------------- */ -static const unsigned int pwm8_a_pins[] = { - /* PWM8_A */ +/* - PWM8 ------------------------------------------------------------------- */ +static const unsigned int pwm8_pins[] = { + /* PWM8 */ RCAR_GP_PIN(1, 13), }; -static const unsigned int pwm8_a_mux[] = { - PWM8_A_MARK, +static const unsigned int pwm8_mux[] = { + PWM8_MARK, }; -/* - PWM9_A ------------------------------------------------------------------- */ -static const unsigned int pwm9_a_pins[] = { - /* PWM9_A */ +/* - PWM9 ------------------------------------------------------------------- */ +static const unsigned int pwm9_pins[] = { + /* PWM9 */ RCAR_GP_PIN(1, 14), }; -static const unsigned int pwm9_a_mux[] = { - PWM9_A_MARK, +static const unsigned int pwm9_mux[] = { + PWM9_MARK, }; /* - QSPI0 ------------------------------------------------------------------ */ @@ -2261,75 +2258,51 @@ static const unsigned int scif0_ctrl_mux[] = { }; /* - SCIF1 ------------------------------------------------------------------ */ -static const unsigned int scif1_data_pins[] = { - /* RX1, TX1 */ +static const unsigned int scif1_data_a_pins[] = { + /* RX1_A, TX1_A */ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), }; -static const unsigned int scif1_data_mux[] = { - RX1_MARK, TX1_MARK, +static const unsigned int scif1_data_a_mux[] = { + RX1_A_MARK, TX1_A_MARK, }; -static const unsigned int scif1_clk_pins[] = { - /* SCK1 */ +static const unsigned int scif1_clk_a_pins[] = { + /* SCK1_A */ RCAR_GP_PIN(0, 18), }; -static const unsigned int scif1_clk_mux[] = { - SCK1_MARK, +static const unsigned int scif1_clk_a_mux[] = { + SCK1_A_MARK, }; -static const unsigned int scif1_ctrl_pins[] = { - /* RTS1_N, CTS1_N */ +static const unsigned int scif1_ctrl_a_pins[] = { + /* RTS1_N_A, CTS1_N_A */ RCAR_GP_PIN(0, 17), RCAR_GP_PIN(0, 16), }; -static const unsigned int scif1_ctrl_mux[] = { - RTS1_N_MARK, CTS1_N_MARK, +static const unsigned int scif1_ctrl_a_mux[] = { + RTS1_N_A_MARK, CTS1_N_A_MARK, }; -/* - SCIF1_X ------------------------------------------------------------------ */ -static const unsigned int scif1_data_x_pins[] = { - /* RX1_X, TX1_X */ +static const unsigned int scif1_data_b_pins[] = { + /* RX1_B, TX1_B */ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), }; -static const unsigned int scif1_data_x_mux[] = { - RX1_X_MARK, TX1_X_MARK, +static const unsigned int scif1_data_b_mux[] = { + RX1_B_MARK, TX1_B_MARK, }; -static const unsigned int scif1_clk_x_pins[] = { - /* SCK1_X */ +static const unsigned int scif1_clk_b_pins[] = { + /* SCK1_B */ RCAR_GP_PIN(1, 10), }; -static const unsigned int scif1_clk_x_mux[] = { - SCK1_X_MARK, +static const unsigned int scif1_clk_b_mux[] = { + SCK1_B_MARK, }; -static const unsigned int scif1_ctrl_x_pins[] = { - /* RTS1_N_X, CTS1_N_X */ +static const unsigned int scif1_ctrl_b_pins[] = { + /* RTS1_N_B, CTS1_N_B */ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 8), }; -static const unsigned int scif1_ctrl_x_mux[] = { - RTS1_N_X_MARK, CTS1_N_X_MARK, +static const unsigned int scif1_ctrl_b_mux[] = { + RTS1_N_B_MARK, CTS1_N_B_MARK, }; /* - SCIF3 ------------------------------------------------------------------ */ -static const unsigned int scif3_data_pins[] = { - /* RX3, TX3 */ - RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0), -}; -static const unsigned int scif3_data_mux[] = { - RX3_MARK, TX3_MARK, -}; -static const unsigned int scif3_clk_pins[] = { - /* SCK3 */ - RCAR_GP_PIN(1, 4), -}; -static const unsigned int scif3_clk_mux[] = { - SCK3_MARK, -}; -static const unsigned int scif3_ctrl_pins[] = { - /* RTS3_N, CTS3_N */ - RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3), -}; -static const unsigned int scif3_ctrl_mux[] = { - RTS3_N_MARK, CTS3_N_MARK, -}; - -/* - SCIF3_A ------------------------------------------------------------------ */ static const unsigned int scif3_data_a_pins[] = { /* RX3_A, TX3_A */ RCAR_GP_PIN(1, 27), RCAR_GP_PIN(1, 28), @@ -2352,6 +2325,28 @@ static const unsigned int scif3_ctrl_a_mux[] = { RTS3_N_A_MARK, CTS3_N_A_MARK, }; +static const unsigned int scif3_data_b_pins[] = { + /* RX3_B, TX3_B */ + RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0), +}; +static const unsigned int scif3_data_b_mux[] = { + RX3_B_MARK, TX3_B_MARK, +}; +static const unsigned int scif3_clk_b_pins[] = { + /* SCK3_B */ + RCAR_GP_PIN(1, 4), +}; +static const unsigned int scif3_clk_b_mux[] = { + SCK3_B_MARK, +}; +static const unsigned int scif3_ctrl_b_pins[] = { + /* RTS3_N_B, CTS3_N_B */ + RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3), +}; +static const unsigned int scif3_ctrl_b_mux[] = { + RTS3_N_B_MARK, CTS3_N_B_MARK, +}; + /* - SCIF4 ------------------------------------------------------------------ */ static const unsigned int scif4_data_pins[] = { /* RX4, TX4 */ @@ -2408,64 +2403,63 @@ static const unsigned int ssi_ctrl_mux[] = { SSI_SCK_MARK, SSI_WS_MARK, }; -/* - TPU ------------------------------------------------------------------- */ -static const unsigned int tpu_to0_pins[] = { - /* TPU0TO0 */ +/* - TPU -------------------------------------------------------------------- */ +static const unsigned int tpu_to0_a_pins[] = { + /* TPU0TO0_A */ RCAR_GP_PIN(2, 8), }; -static const unsigned int tpu_to0_mux[] = { - TPU0TO0_MARK, +static const unsigned int tpu_to0_a_mux[] = { + TPU0TO0_A_MARK, }; -static const unsigned int tpu_to1_pins[] = { - /* TPU0TO1 */ +static const unsigned int tpu_to1_a_pins[] = { + /* TPU0TO1_A */ RCAR_GP_PIN(2, 7), }; -static const unsigned int tpu_to1_mux[] = { - TPU0TO1_MARK, +static const unsigned int tpu_to1_a_mux[] = { + TPU0TO1_A_MARK, }; -static const unsigned int tpu_to2_pins[] = { - /* TPU0TO2 */ +static const unsigned int tpu_to2_a_pins[] = { + /* TPU0TO2_A */ RCAR_GP_PIN(2, 12), }; -static const unsigned int tpu_to2_mux[] = { - TPU0TO2_MARK, +static const unsigned int tpu_to2_a_mux[] = { + TPU0TO2_A_MARK, }; -static const unsigned int tpu_to3_pins[] = { - /* TPU0TO3 */ +static const unsigned int tpu_to3_a_pins[] = { + /* TPU0TO3_A */ RCAR_GP_PIN(2, 13), }; -static const unsigned int tpu_to3_mux[] = { - TPU0TO3_MARK, +static const unsigned int tpu_to3_a_mux[] = { + TPU0TO3_A_MARK, }; -/* - TPU_A ------------------------------------------------------------------- */ -static const unsigned int tpu_to0_a_pins[] = { - /* TPU0TO0_A */ +static const unsigned int tpu_to0_b_pins[] = { + /* TPU0TO0_B */ RCAR_GP_PIN(1, 25), }; -static const unsigned int tpu_to0_a_mux[] = { - TPU0TO0_A_MARK, +static const unsigned int tpu_to0_b_mux[] = { + TPU0TO0_B_MARK, }; -static const unsigned int tpu_to1_a_pins[] = { - /* TPU0TO1_A */ +static const unsigned int tpu_to1_b_pins[] = { + /* TPU0TO1_B */ RCAR_GP_PIN(1, 26), }; -static const unsigned int tpu_to1_a_mux[] = { - TPU0TO1_A_MARK, +static const unsigned int tpu_to1_b_mux[] = { + TPU0TO1_B_MARK, }; -static const unsigned int tpu_to2_a_pins[] = { - /* TPU0TO2_A */ +static const unsigned int tpu_to2_b_pins[] = { + /* TPU0TO2_B */ RCAR_GP_PIN(2, 0), }; -static const unsigned int tpu_to2_a_mux[] = { - TPU0TO2_A_MARK, +static const unsigned int tpu_to2_b_mux[] = { + TPU0TO2_B_MARK, }; -static const unsigned int tpu_to3_a_pins[] = { - /* TPU0TO3_A */ +static const unsigned int tpu_to3_b_pins[] = { + /* TPU0TO3_B */ RCAR_GP_PIN(2, 1), }; -static const unsigned int tpu_to3_a_mux[] = { - TPU0TO3_A_MARK, +static const unsigned int tpu_to3_b_mux[] = { + TPU0TO3_B_MARK, }; /* - TSN0 ------------------------------------------------ */ @@ -2578,8 +2572,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(canfd2_data), SH_PFC_PIN_GROUP(canfd3_data), SH_PFC_PIN_GROUP(canfd4_data), - SH_PFC_PIN_GROUP(canfd5_data), /* suffix might be updated */ - SH_PFC_PIN_GROUP(canfd5_data_b), /* suffix might be updated */ + SH_PFC_PIN_GROUP(canfd5_data_a), + SH_PFC_PIN_GROUP(canfd5_data_b), SH_PFC_PIN_GROUP(canfd6_data), SH_PFC_PIN_GROUP(canfd7_data), SH_PFC_PIN_GROUP(can_clk), @@ -2587,21 +2581,21 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(hscif0_data), SH_PFC_PIN_GROUP(hscif0_clk), SH_PFC_PIN_GROUP(hscif0_ctrl), - SH_PFC_PIN_GROUP(hscif1_data), /* suffix might be updated */ - SH_PFC_PIN_GROUP(hscif1_clk), /* suffix might be updated */ - SH_PFC_PIN_GROUP(hscif1_ctrl), /* suffix might be updated */ - SH_PFC_PIN_GROUP(hscif1_data_x), /* suffix might be updated */ - SH_PFC_PIN_GROUP(hscif1_clk_x), /* suffix might be updated */ - SH_PFC_PIN_GROUP(hscif1_ctrl_x), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif1_data_a), + SH_PFC_PIN_GROUP(hscif1_clk_a), + SH_PFC_PIN_GROUP(hscif1_ctrl_a), + SH_PFC_PIN_GROUP(hscif1_data_b), + SH_PFC_PIN_GROUP(hscif1_clk_b), + SH_PFC_PIN_GROUP(hscif1_ctrl_b), SH_PFC_PIN_GROUP(hscif2_data), SH_PFC_PIN_GROUP(hscif2_clk), SH_PFC_PIN_GROUP(hscif2_ctrl), - SH_PFC_PIN_GROUP(hscif3_data), /* suffix might be updated */ - SH_PFC_PIN_GROUP(hscif3_clk), /* suffix might be updated */ - SH_PFC_PIN_GROUP(hscif3_ctrl), /* suffix might be updated */ - SH_PFC_PIN_GROUP(hscif3_data_a), /* suffix might be updated */ - SH_PFC_PIN_GROUP(hscif3_clk_a), /* suffix might be updated */ - SH_PFC_PIN_GROUP(hscif3_ctrl_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(hscif3_data_a), + SH_PFC_PIN_GROUP(hscif3_clk_a), + SH_PFC_PIN_GROUP(hscif3_ctrl_a), + SH_PFC_PIN_GROUP(hscif3_data_b), + SH_PFC_PIN_GROUP(hscif3_clk_b), + SH_PFC_PIN_GROUP(hscif3_ctrl_b), SH_PFC_PIN_GROUP(i2c0), SH_PFC_PIN_GROUP(i2c1), @@ -2663,18 +2657,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(pcie0_clkreq_n), SH_PFC_PIN_GROUP(pcie1_clkreq_n), - SH_PFC_PIN_GROUP(pwm0_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(pwm0), SH_PFC_PIN_GROUP(pwm1_a), SH_PFC_PIN_GROUP(pwm1_b), - SH_PFC_PIN_GROUP(pwm2_b), /* suffix might be updated */ + SH_PFC_PIN_GROUP(pwm2), SH_PFC_PIN_GROUP(pwm3_a), SH_PFC_PIN_GROUP(pwm3_b), SH_PFC_PIN_GROUP(pwm4), SH_PFC_PIN_GROUP(pwm5), SH_PFC_PIN_GROUP(pwm6), SH_PFC_PIN_GROUP(pwm7), - SH_PFC_PIN_GROUP(pwm8_a), /* suffix might be updated */ - SH_PFC_PIN_GROUP(pwm9_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(pwm8), + SH_PFC_PIN_GROUP(pwm9), SH_PFC_PIN_GROUP(qspi0_ctrl), BUS_DATA_PIN_GROUP(qspi0_data, 2), @@ -2686,18 +2680,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(scif0_data), SH_PFC_PIN_GROUP(scif0_clk), SH_PFC_PIN_GROUP(scif0_ctrl), - SH_PFC_PIN_GROUP(scif1_data), /* suffix might be updated */ - SH_PFC_PIN_GROUP(scif1_clk), /* suffix might be updated */ - SH_PFC_PIN_GROUP(scif1_ctrl), /* suffix might be updated */ - SH_PFC_PIN_GROUP(scif1_data_x), /* suffix might be updated */ - SH_PFC_PIN_GROUP(scif1_clk_x), /* suffix might be updated */ - SH_PFC_PIN_GROUP(scif1_ctrl_x), /* suffix might be updated */ - SH_PFC_PIN_GROUP(scif3_data), /* suffix might be updated */ - SH_PFC_PIN_GROUP(scif3_clk), /* suffix might be updated */ - SH_PFC_PIN_GROUP(scif3_ctrl), /* suffix might be updated */ - SH_PFC_PIN_GROUP(scif3_data_a), /* suffix might be updated */ - SH_PFC_PIN_GROUP(scif3_clk_a), /* suffix might be updated */ - SH_PFC_PIN_GROUP(scif3_ctrl_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(scif1_data_a), + SH_PFC_PIN_GROUP(scif1_clk_a), + SH_PFC_PIN_GROUP(scif1_ctrl_a), + SH_PFC_PIN_GROUP(scif1_data_b), + SH_PFC_PIN_GROUP(scif1_clk_b), + SH_PFC_PIN_GROUP(scif1_ctrl_b), + SH_PFC_PIN_GROUP(scif3_data_a), + SH_PFC_PIN_GROUP(scif3_clk_a), + SH_PFC_PIN_GROUP(scif3_ctrl_a), + SH_PFC_PIN_GROUP(scif3_data_b), + SH_PFC_PIN_GROUP(scif3_clk_b), + SH_PFC_PIN_GROUP(scif3_ctrl_b), SH_PFC_PIN_GROUP(scif4_data), SH_PFC_PIN_GROUP(scif4_clk), SH_PFC_PIN_GROUP(scif4_ctrl), @@ -2707,14 +2701,14 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(ssi_data), SH_PFC_PIN_GROUP(ssi_ctrl), - SH_PFC_PIN_GROUP(tpu_to0), /* suffix might be updated */ - SH_PFC_PIN_GROUP(tpu_to0_a), /* suffix might be updated */ - SH_PFC_PIN_GROUP(tpu_to1), /* suffix might be updated */ - SH_PFC_PIN_GROUP(tpu_to1_a), /* suffix might be updated */ - SH_PFC_PIN_GROUP(tpu_to2), /* suffix might be updated */ - SH_PFC_PIN_GROUP(tpu_to2_a), /* suffix might be updated */ - SH_PFC_PIN_GROUP(tpu_to3), /* suffix might be updated */ - SH_PFC_PIN_GROUP(tpu_to3_a), /* suffix might be updated */ + SH_PFC_PIN_GROUP(tpu_to0_a), + SH_PFC_PIN_GROUP(tpu_to0_b), + SH_PFC_PIN_GROUP(tpu_to1_a), + SH_PFC_PIN_GROUP(tpu_to1_b), + SH_PFC_PIN_GROUP(tpu_to2_a), + SH_PFC_PIN_GROUP(tpu_to2_b), + SH_PFC_PIN_GROUP(tpu_to3_a), + SH_PFC_PIN_GROUP(tpu_to3_b), SH_PFC_PIN_GROUP(tsn0_link), SH_PFC_PIN_GROUP(tsn0_phy_int), @@ -2788,8 +2782,7 @@ static const char * const canfd4_groups[] = { }; static const char * const canfd5_groups[] = { - /* suffix might be updated */ - "canfd5_data", + "canfd5_data_a", "canfd5_data_b", }; @@ -2812,13 +2805,12 @@ static const char * const hscif0_groups[] = { }; static const char * const hscif1_groups[] = { - /* suffix might be updated */ - "hscif1_data", - "hscif1_clk", - "hscif1_ctrl", - "hscif1_data_x", - "hscif1_clk_x", - "hscif1_ctrl_x", + "hscif1_data_a", + "hscif1_clk_a", + "hscif1_ctrl_a", + "hscif1_data_b", + "hscif1_clk_b", + "hscif1_ctrl_b", }; static const char * const hscif2_groups[] = { @@ -2828,13 +2820,12 @@ static const char * const hscif2_groups[] = { }; static const char * const hscif3_groups[] = { - /* suffix might be updated */ - "hscif3_data", - "hscif3_clk", - "hscif3_ctrl", "hscif3_data_a", "hscif3_clk_a", "hscif3_ctrl_a", + "hscif3_data_b", + "hscif3_clk_b", + "hscif3_ctrl_b", }; static const char * const i2c0_groups[] = { @@ -2931,8 +2922,7 @@ static const char * const pcie_groups[] = { }; static const char * const pwm0_groups[] = { - /* suffix might be updated */ - "pwm0_a", + "pwm0", }; static const char * const pwm1_groups[] = { @@ -2941,8 +2931,7 @@ static const char * const pwm1_groups[] = { }; static const char * const pwm2_groups[] = { - /* suffix might be updated */ - "pwm2_b", + "pwm2", }; static const char * const pwm3_groups[] = { @@ -2967,13 +2956,11 @@ static const char * const pwm7_groups[] = { }; static const char * const pwm8_groups[] = { - /* suffix might be updated */ - "pwm8_a", + "pwm8", }; static const char * const pwm9_groups[] = { - /* suffix might be updated */ - "pwm9_a", + "pwm9", }; static const char * const qspi0_groups[] = { @@ -2995,23 +2982,21 @@ static const char * const scif0_groups[] = { }; static const char * const scif1_groups[] = { - /* suffix might be updated */ - "scif1_data", - "scif1_clk", - "scif1_ctrl", - "scif1_data_x", - "scif1_clk_x", - "scif1_ctrl_x", + "scif1_data_a", + "scif1_clk_a", + "scif1_ctrl_a", + "scif1_data_b", + "scif1_clk_b", + "scif1_ctrl_b", }; static const char * const scif3_groups[] = { - /* suffix might be updated */ - "scif3_data", - "scif3_clk", - "scif3_ctrl", "scif3_data_a", "scif3_clk_a", "scif3_ctrl_a", + "scif3_data_b", + "scif3_clk_b", + "scif3_ctrl_b", }; static const char * const scif4_groups[] = { @@ -3034,15 +3019,14 @@ static const char * const ssi_groups[] = { }; static const char * const tpu_groups[] = { - /* suffix might be updated */ - "tpu_to0", "tpu_to0_a", - "tpu_to1", + "tpu_to0_b", "tpu_to1_a", - "tpu_to2", + "tpu_to1_b", "tpu_to2_a", - "tpu_to3", + "tpu_to2_b", "tpu_to3_a", + "tpu_to3_b", }; static const char * const tsn0_groups[] = { diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c index 040f2c46a868..ef9758638501 100644 --- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c +++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c @@ -876,7 +876,7 @@ static int ti_iodelay_probe(struct platform_device *pdev) iod->desc.name = dev_name(dev); iod->desc.owner = THIS_MODULE; - ret = pinctrl_register_and_init(&iod->desc, dev, iod, &iod->pctl); + ret = devm_pinctrl_register_and_init(dev, &iod->desc, iod, &iod->pctl); if (ret) { dev_err(dev, "Failed to register pinctrl\n"); goto exit_out; @@ -884,7 +884,11 @@ static int ti_iodelay_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iod); - return pinctrl_enable(iod->pctl); + ret = pinctrl_enable(iod->pctl); + if (ret) + goto exit_out; + + return 0; exit_out: of_node_put(np); @@ -899,9 +903,6 @@ static void ti_iodelay_remove(struct platform_device *pdev) { struct ti_iodelay_device *iod = platform_get_drvdata(pdev); - if (iod->pctl) - pinctrl_unregister(iod->pctl); - ti_iodelay_pinconf_deinit_dev(iod); /* Expect other allocations to be freed by devm */ diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index fbbe4f77aa5d..837202842a6f 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -11,4 +11,4 @@ obj-$(CONFIG_OLPC_EC) += olpc/ obj-$(CONFIG_GOLDFISH) += goldfish/ obj-$(CONFIG_CHROME_PLATFORMS) += chrome/ obj-$(CONFIG_SURFACE_PLATFORMS) += surface/ -obj-$(CONFIG_ARM64) += arm64/ +obj-$(CONFIG_ARM64_PLATFORM_DEVICES) += arm64/ diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c index e1d313246beb..5996e9d53c38 100644 --- a/drivers/platform/chrome/cros_ec_debugfs.c +++ b/drivers/platform/chrome/cros_ec_debugfs.c @@ -330,6 +330,7 @@ static int ec_read_version_supported(struct cros_ec_dev *ec) if (!msg) return 0; + msg->version = 1; msg->command = EC_CMD_GET_CMD_VERSIONS + ec->cmd_offset; msg->outsize = sizeof(*params); msg->insize = sizeof(*response); diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c index d8c5f9195f85..2ac2f31090f9 100644 --- a/drivers/platform/mips/cpu_hwmon.c +++ b/drivers/platform/mips/cpu_hwmon.c @@ -139,6 +139,9 @@ static int __init loongson_hwmon_init(void) csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) & LOONGSON_CSRF_TEMP; + if (!csr_temp_enable && !loongson_chiptemp[0]) + return -ENODEV; + nr_packages = loongson_sysconf.nr_cpus / loongson_sysconf.cores_per_package; diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 3f9b6285c9a6..bc9c5db38324 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -879,10 +879,14 @@ static ssize_t kbd_rgb_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct asus_wmi *asus = dev_get_drvdata(dev); u32 cmd, mode, r, g, b, speed; + struct led_classdev *led; + struct asus_wmi *asus; int err; + led = dev_get_drvdata(dev); + asus = container_of(led, struct asus_wmi, kbd_led); + if (sscanf(buf, "%d %d %d %d %d %d", &cmd, &mode, &r, &g, &b, &speed) != 6) return -EINVAL; diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c index 9b34d1a60f66..4b0ad1b4b4c9 100644 --- a/drivers/power/supply/ab8500_charger.c +++ b/drivers/power/supply/ab8500_charger.c @@ -488,8 +488,10 @@ static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di) /* Only measure voltage if the charger is connected */ if (di->ac.charger_connected) { ret = iio_read_channel_processed(di->adc_main_charger_v, &vch); - if (ret < 0) + if (ret < 0) { dev_err(di->dev, "%s ADC conv failed,\n", __func__); + return ret; + } } else { vch = 0; } @@ -540,8 +542,10 @@ static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di) /* Only measure voltage if the charger is connected */ if (di->usb.charger_connected) { ret = iio_read_channel_processed(di->adc_vbus_v, &vch); - if (ret < 0) + if (ret < 0) { dev_err(di->dev, "%s ADC conv failed,\n", __func__); + return ret; + } } else { vch = 0; } @@ -563,8 +567,10 @@ static int ab8500_charger_get_usb_current(struct ab8500_charger *di) /* Only measure current if the charger is online */ if (di->usb.charger_online) { ret = iio_read_channel_processed(di->adc_usb_charger_c, &ich); - if (ret < 0) + if (ret < 0) { dev_err(di->dev, "%s ADC conv failed,\n", __func__); + return ret; + } } else { ich = 0; } @@ -586,8 +592,10 @@ static int ab8500_charger_get_ac_current(struct ab8500_charger *di) /* Only measure current if the charger is online */ if (di->ac.charger_online) { ret = iio_read_channel_processed(di->adc_main_charger_c, &ich); - if (ret < 0) + if (ret < 0) { dev_err(di->dev, "%s ADC conv failed,\n", __func__); + return ret; + } } else { ich = 0; } diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c index 2e7fdfde47ec..0a40f425c277 100644 --- a/drivers/power/supply/ingenic-battery.c +++ b/drivers/power/supply/ingenic-battery.c @@ -31,8 +31,9 @@ static int ingenic_battery_get_property(struct power_supply *psy, switch (psp) { case POWER_SUPPLY_PROP_HEALTH: - ret = iio_read_channel_processed(bat->channel, &val->intval); - val->intval *= 1000; + ret = iio_read_channel_processed_scale(bat->channel, + &val->intval, + 1000); if (val->intval < info->voltage_min_design_uv) val->intval = POWER_SUPPLY_HEALTH_DEAD; else if (val->intval > info->voltage_max_design_uv) @@ -41,8 +42,9 @@ static int ingenic_battery_get_property(struct power_supply *psy, val->intval = POWER_SUPPLY_HEALTH_GOOD; return ret; case POWER_SUPPLY_PROP_VOLTAGE_NOW: - ret = iio_read_channel_processed(bat->channel, &val->intval); - val->intval *= 1000; + ret = iio_read_channel_processed_scale(bat->channel, + &val->intval, + 1000); return ret; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = info->voltage_min_design_uv; diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c index 528e54c5999d..aca11493239a 100644 --- a/drivers/pwm/pwm-atmel-tcb.c +++ b/drivers/pwm/pwm-atmel-tcb.c @@ -81,7 +81,8 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip, tcbpwm->period = 0; tcbpwm->div = 0; - spin_lock(&tcbpwmc->lock); + guard(spinlock)(&tcbpwmc->lock); + regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr); /* * Get init config from Timer Counter registers if @@ -107,7 +108,6 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip, cmr |= ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO | ATMEL_TC_EEVT_XC0; regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr); - spin_unlock(&tcbpwmc->lock); return 0; } @@ -137,7 +137,6 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm, if (tcbpwm->duty == 0) polarity = !polarity; - spin_lock(&tcbpwmc->lock); regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr); /* flush old setting and set the new one */ @@ -172,8 +171,6 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm, ATMEL_TC_SWTRG); tcbpwmc->bkup.enabled = 0; } - - spin_unlock(&tcbpwmc->lock); } static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm, @@ -194,7 +191,6 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm, if (tcbpwm->duty == 0) polarity = !polarity; - spin_lock(&tcbpwmc->lock); regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr); /* flush old setting and set the new one */ @@ -256,7 +252,6 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm, regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CCR), ATMEL_TC_SWTRG | ATMEL_TC_CLKEN); tcbpwmc->bkup.enabled = 1; - spin_unlock(&tcbpwmc->lock); return 0; } @@ -341,9 +336,12 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { + struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); int duty_cycle, period; int ret; + guard(spinlock)(&tcbpwmc->lock); + if (!state->enabled) { atmel_tcb_pwm_disable(chip, pwm, state->polarity); return 0; diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c index 8bae3fd2b330..c586029caf23 100644 --- a/drivers/pwm/pwm-stm32.c +++ b/drivers/pwm/pwm-stm32.c @@ -452,8 +452,9 @@ static int stm32_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, enabled = pwm->state.enabled; - if (enabled && !state->enabled) { - stm32_pwm_disable(priv, pwm->hwpwm); + if (!state->enabled) { + if (enabled) + stm32_pwm_disable(priv, pwm->hwpwm); return 0; } diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 5a3fb902acc9..144c8e9a642e 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -726,31 +726,37 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, struct resource res; node = of_parse_phandle(np, "memory-region", a); + if (!node) + continue; /* Not map vdevbuffer, vdevring region */ if (!strncmp(node->name, "vdev", strlen("vdev"))) { of_node_put(node); continue; } err = of_address_to_resource(node, 0, &res); - of_node_put(node); if (err) { dev_err(dev, "unable to resolve memory region\n"); + of_node_put(node); return err; } - if (b >= IMX_RPROC_MEM_MAX) + if (b >= IMX_RPROC_MEM_MAX) { + of_node_put(node); break; + } /* Not use resource version, because we might share region */ priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res)); if (!priv->mem[b].cpu_addr) { dev_err(dev, "failed to remap %pr\n", &res); + of_node_put(node); return -ENOMEM; } priv->mem[b].sys_addr = res.start; priv->mem[b].size = resource_size(&res); if (!strcmp(node->name, "rsc-table")) priv->rsc_table = priv->mem[b].cpu_addr; + of_node_put(node); b++; } diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c index b8498772dba1..abf7b371b860 100644 --- a/drivers/remoteproc/mtk_scp.c +++ b/drivers/remoteproc/mtk_scp.c @@ -1344,14 +1344,12 @@ static int scp_probe(struct platform_device *pdev) /* l1tcm is an optional memory region */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm"); - scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res); - if (IS_ERR(scp_cluster->l1tcm_base)) { - ret = PTR_ERR(scp_cluster->l1tcm_base); - if (ret != -EINVAL) - return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n"); + if (res) { + scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res); + if (IS_ERR(scp_cluster->l1tcm_base)) + return dev_err_probe(dev, PTR_ERR(scp_cluster->l1tcm_base), + "Failed to map l1tcm memory\n"); - scp_cluster->l1tcm_base = NULL; - } else { scp_cluster->l1tcm_size = resource_size(res); scp_cluster->l1tcm_phys = res->start; } @@ -1390,7 +1388,7 @@ static const struct mtk_scp_sizes_data default_scp_sizes = { }; static const struct mtk_scp_sizes_data mt8188_scp_sizes = { - .max_dram_size = 0x500000, + .max_dram_size = 0x800000, .ipi_share_buffer_size = 600, }; @@ -1399,6 +1397,11 @@ static const struct mtk_scp_sizes_data mt8188_scp_c1_sizes = { .ipi_share_buffer_size = 600, }; +static const struct mtk_scp_sizes_data mt8195_scp_sizes = { + .max_dram_size = 0x800000, + .ipi_share_buffer_size = 288, +}; + static const struct mtk_scp_of_data mt8183_of_data = { .scp_clk_get = mt8183_scp_clk_get, .scp_before_load = mt8183_scp_before_load, @@ -1476,7 +1479,7 @@ static const struct mtk_scp_of_data mt8195_of_data = { .scp_da_to_va = mt8192_scp_da_to_va, .host_to_scp_reg = MT8192_GIPC_IN_SET, .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT, - .scp_sizes = &default_scp_sizes, + .scp_sizes = &mt8195_scp_sizes, }; static const struct mtk_scp_of_data mt8195_of_data_c1 = { diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c index 88623df7d0c3..8c7f7950b80e 100644 --- a/drivers/remoteproc/stm32_rproc.c +++ b/drivers/remoteproc/stm32_rproc.c @@ -294,7 +294,7 @@ static void stm32_rproc_mb_vq_work(struct work_struct *work) mutex_lock(&rproc->lock); - if (rproc->state != RPROC_RUNNING) + if (rproc->state != RPROC_RUNNING && rproc->state != RPROC_ATTACHED) goto unlock_mutex; if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE) diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index 50e486bcfa10..39a47540c590 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -1144,6 +1144,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) u32 atcm_enable, btcm_enable, loczrama; struct k3_r5_core *core0; enum cluster_mode mode = cluster->mode; + int reset_ctrl_status; int ret; core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); @@ -1160,11 +1161,11 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) r_state, c_state); } - ret = reset_control_status(core->reset); - if (ret < 0) { + reset_ctrl_status = reset_control_status(core->reset); + if (reset_ctrl_status < 0) { dev_err(cdev, "failed to get initial local reset status, ret = %d\n", - ret); - return ret; + reset_ctrl_status); + return reset_ctrl_status; } /* @@ -1199,7 +1200,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) * irrelevant if module reset is asserted (POR value has local reset * deasserted), and is deemed as remoteproc mode */ - if (c_state && !ret && !halted) { + if (c_state && !reset_ctrl_status && !halted) { dev_info(cdev, "configured R5F for IPC-only mode\n"); kproc->rproc->state = RPROC_DETACHED; ret = 1; @@ -1217,7 +1218,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) ret = 0; } else { dev_err(cdev, "mismatched mode: local_reset = %s, module_reset = %s, core_state = %s\n", - !ret ? "deasserted" : "asserted", + !reset_ctrl_status ? "deasserted" : "asserted", c_state ? "deasserted" : "asserted", halted ? "halted" : "unhalted"); ret = -EINVAL; diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 5faafb4aa55c..cca650b2e0b9 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -274,10 +274,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) return err; /* full-function RTCs won't have such missing fields */ - if (rtc_valid_tm(&alarm->time) == 0) { - rtc_add_offset(rtc, &alarm->time); - return 0; - } + err = rtc_valid_tm(&alarm->time); + if (!err) + goto done; /* get the "after" timestamp, to detect wrapped fields */ err = rtc_read_time(rtc, &now); @@ -379,6 +378,8 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) if (err && alarm->enabled) dev_warn(&rtc->dev, "invalid alarm value: %ptR\n", &alarm->time); + else + rtc_add_offset(rtc, &alarm->time); return err; } diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c index fde2b8054c2e..1298962402ff 100644 --- a/drivers/rtc/rtc-abx80x.c +++ b/drivers/rtc/rtc-abx80x.c @@ -705,14 +705,18 @@ static int abx80x_nvmem_xfer(struct abx80x_priv *priv, unsigned int offset, if (ret) return ret; - if (write) + if (write) { ret = i2c_smbus_write_i2c_block_data(priv->client, reg, len, val); - else + if (ret) + return ret; + } else { ret = i2c_smbus_read_i2c_block_data(priv->client, reg, len, val); - if (ret) - return ret; + if (ret <= 0) + return ret ? ret : -EIO; + len = ret; + } offset += len; val += len; diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 7d99cd2c37a0..35dca2accbb8 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -643,11 +643,10 @@ static int cmos_nvram_read(void *priv, unsigned int off, void *val, size_t count) { unsigned char *buf = val; - int retval; off += NVRAM_OFFSET; spin_lock_irq(&rtc_lock); - for (retval = 0; count; count--, off++, retval++) { + for (; count; count--, off++) { if (off < 128) *buf++ = CMOS_READ(off); else if (can_bank2) @@ -657,7 +656,7 @@ static int cmos_nvram_read(void *priv, unsigned int off, void *val, } spin_unlock_irq(&rtc_lock); - return retval; + return count ? -EIO : 0; } static int cmos_nvram_write(void *priv, unsigned int off, void *val, @@ -665,7 +664,6 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val, { struct cmos_rtc *cmos = priv; unsigned char *buf = val; - int retval; /* NOTE: on at least PCs and Ataris, the boot firmware uses a * checksum on part of the NVRAM data. That's currently ignored @@ -674,7 +672,7 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val, */ off += NVRAM_OFFSET; spin_lock_irq(&rtc_lock); - for (retval = 0; count; count--, off++, retval++) { + for (; count; count--, off++) { /* don't trash RTC registers */ if (off == cmos->day_alrm || off == cmos->mon_alrm @@ -689,7 +687,7 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val, } spin_unlock_irq(&rtc_lock); - return retval; + return count ? -EIO : 0; } /*----------------------------------------------------------------*/ diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index e50c23ee1646..206f96b90f58 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -775,14 +775,13 @@ static int isl1208_nvmem_read(void *priv, unsigned int off, void *buf, { struct isl1208_state *isl1208 = priv; struct i2c_client *client = to_i2c_client(isl1208->rtc->dev.parent); - int ret; /* nvmem sanitizes offset/count for us, but count==0 is possible */ if (!count) return count; - ret = isl1208_i2c_read_regs(client, ISL1208_REG_USR1 + off, buf, + + return isl1208_i2c_read_regs(client, ISL1208_REG_USR1 + off, buf, count); - return ret == 0 ? count : ret; } static int isl1208_nvmem_write(void *priv, unsigned int off, void *buf, @@ -790,15 +789,13 @@ static int isl1208_nvmem_write(void *priv, unsigned int off, void *buf, { struct isl1208_state *isl1208 = priv; struct i2c_client *client = to_i2c_client(isl1208->rtc->dev.parent); - int ret; /* nvmem sanitizes off/count for us, but count==0 is possible */ if (!count) return count; - ret = isl1208_i2c_set_regs(client, ISL1208_REG_USR1 + off, buf, - count); - return ret == 0 ? count : ret; + return isl1208_i2c_set_regs(client, ISL1208_REG_USR1 + off, buf, + count); } static const struct nvmem_config isl1208_nvmem_config = { diff --git a/drivers/rtc/rtc-tps6594.c b/drivers/rtc/rtc-tps6594.c index 838ae8562a35..bc8dc735aa23 100644 --- a/drivers/rtc/rtc-tps6594.c +++ b/drivers/rtc/rtc-tps6594.c @@ -360,10 +360,6 @@ static int tps6594_rtc_probe(struct platform_device *pdev) int irq; int ret; - rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL); - if (!rtc) - return -ENOMEM; - rtc = devm_rtc_allocate_device(dev); if (IS_ERR(rtc)) return PTR_ERR(rtc); diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 0316c20823ee..6adaeb985dde 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -2248,13 +2248,19 @@ static ssize_t dasd_copy_pair_store(struct device *dev, /* allocate primary devmap if needed */ prim_devmap = dasd_find_busid(prim_busid); - if (IS_ERR(prim_devmap)) + if (IS_ERR(prim_devmap)) { prim_devmap = dasd_add_busid(prim_busid, DASD_FEATURE_DEFAULT); + if (IS_ERR(prim_devmap)) + return PTR_ERR(prim_devmap); + } /* allocate secondary devmap if needed */ sec_devmap = dasd_find_busid(sec_busid); - if (IS_ERR(sec_devmap)) + if (IS_ERR(sec_devmap)) { sec_devmap = dasd_add_busid(sec_busid, DASD_FEATURE_DEFAULT); + if (IS_ERR(sec_devmap)) + return PTR_ERR(sec_devmap); + } /* setting copy relation is only allowed for offline secondary */ if (sec_devmap->device) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index a46c73e8d7c4..0a9d6978cb0c 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1907,6 +1907,11 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr, /* Get transceiver information */ rdp_context = kmalloc(sizeof(*rdp_context), GFP_KERNEL); + if (!rdp_context) { + len = scnprintf(buf, PAGE_SIZE - len, + "SPF info NA: alloc failure\n"); + return len; + } rc = lpfc_get_sfp_info_wait(phba, rdp_context); if (rc) { diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 153770bdc56a..13b08c85440f 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -5725,7 +5725,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) return ndlp; if (ndlp->nlp_state > NLP_STE_UNUSED_NODE && - ndlp->nlp_state < NLP_STE_PRLI_ISSUE) { + ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index f475e7ece41a..3e55d5edd60a 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -10579,10 +10579,11 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq; union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe; - struct sli4_sge *sgl; + struct sli4_sge_le *sgl; + u32 type_size; /* 128 byte wqe support here */ - sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl; if (phba->fcp_embed_io) { struct fcp_cmnd *fcp_cmnd; @@ -10591,9 +10592,9 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) fcp_cmnd = lpfc_cmd->fcp_cmnd; /* Word 0-2 - FCP_CMND */ - wqe->generic.bde.tus.f.bdeFlags = - BUFF_TYPE_BDE_IMMED; - wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; + type_size = le32_to_cpu(sgl->sge_len); + type_size |= ULP_BDE64_TYPE_BDE_IMMED; + wqe->generic.bde.tus.w = type_size; wqe->generic.bde.addrHigh = 0; wqe->generic.bde.addrLow = 72; /* Word 18 */ @@ -10602,13 +10603,13 @@ lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) /* Word 18-29 FCP CMND Payload */ ptr = &wqe->words[18]; - memcpy(ptr, fcp_cmnd, sgl->sge_len); + lpfc_sli_pcimem_bcopy(fcp_cmnd, ptr, le32_to_cpu(sgl->sge_len)); } else { /* Word 0-2 - Inline BDE */ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; - wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; - wqe->generic.bde.addrHigh = sgl->addr_hi; - wqe->generic.bde.addrLow = sgl->addr_lo; + wqe->generic.bde.tus.f.bdeSize = le32_to_cpu(sgl->sge_len); + wqe->generic.bde.addrHigh = le32_to_cpu(sgl->addr_hi); + wqe->generic.bde.addrLow = le32_to_cpu(sgl->addr_lo); /* Word 10 */ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 19bb64bdd88b..52dc9604f567 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -324,7 +324,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) "request_sg_cnt=%x reply_sg_cnt=%x.\n", bsg_job->request_payload.sg_cnt, bsg_job->reply_payload.sg_cnt); - rval = -EPERM; + rval = -ENOBUFS; goto done; } @@ -3059,17 +3059,61 @@ qla24xx_bsg_request(struct bsg_job *bsg_job) return ret; } -int -qla24xx_bsg_timeout(struct bsg_job *bsg_job) +static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job) { + bool found = false; struct fc_bsg_reply *bsg_reply = bsg_job->reply; scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); struct qla_hw_data *ha = vha->hw; - srb_t *sp; - int cnt, que; + srb_t *sp = NULL; + int cnt; unsigned long flags; struct req_que *req; + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + req = qpair->req; + + for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { + sp = req->outstanding_cmds[cnt]; + if (sp && + (sp->type == SRB_CT_CMD || + sp->type == SRB_ELS_CMD_HST || + sp->type == SRB_ELS_CMD_HST_NOLOGIN) && + sp->u.bsg_job == bsg_job) { + req->outstanding_cmds[cnt] = NULL; + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) { + ql_log(ql_log_warn, vha, 0x7089, + "mbx abort_command failed.\n"); + bsg_reply->result = -EIO; + } else { + ql_dbg(ql_dbg_user, vha, 0x708a, + "mbx abort_command success.\n"); + bsg_reply->result = 0; + } + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + + found = true; + goto done; + } + } + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + +done: + return found; +} + +int +qla24xx_bsg_timeout(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct qla_hw_data *ha = vha->hw; + int i; + struct qla_qpair *qpair; + ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n", __func__, bsg_job); @@ -3079,48 +3123,22 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job) qla_pci_set_eeh_busy(vha); } + if (qla_bsg_found(ha->base_qpair, bsg_job)) + goto done; + /* find the bsg job from the active list of commands */ - spin_lock_irqsave(&ha->hardware_lock, flags); - for (que = 0; que < ha->max_req_queues; que++) { - req = ha->req_q_map[que]; - if (!req) + for (i = 0; i < ha->max_qpairs; i++) { + qpair = vha->hw->queue_pair_map[i]; + if (!qpair) continue; - - for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { - sp = req->outstanding_cmds[cnt]; - if (sp && - (sp->type == SRB_CT_CMD || - sp->type == SRB_ELS_CMD_HST || - sp->type == SRB_ELS_CMD_HST_NOLOGIN || - sp->type == SRB_FXIOCB_BCMD) && - sp->u.bsg_job == bsg_job) { - req->outstanding_cmds[cnt] = NULL; - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) { - ql_log(ql_log_warn, vha, 0x7089, - "mbx abort_command failed.\n"); - bsg_reply->result = -EIO; - } else { - ql_dbg(ql_dbg_user, vha, 0x708a, - "mbx abort_command success.\n"); - bsg_reply->result = 0; - } - spin_lock_irqsave(&ha->hardware_lock, flags); - goto done; - - } - } + if (qla_bsg_found(qpair, bsg_job)) + goto done; } - spin_unlock_irqrestore(&ha->hardware_lock, flags); + ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); bsg_reply->result = -ENXIO; - return 0; done: - spin_unlock_irqrestore(&ha->hardware_lock, flags); - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 2f49baf131e2..7cf998e3cc68 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3309,9 +3309,20 @@ struct fab_scan_rp { u8 node_name[8]; }; +enum scan_step { + FAB_SCAN_START, + FAB_SCAN_GPNFT_FCP, + FAB_SCAN_GNNFT_FCP, + FAB_SCAN_GPNFT_NVME, + FAB_SCAN_GNNFT_NVME, +}; + struct fab_scan { struct fab_scan_rp *l; u32 size; + u32 rscn_gen_start; + u32 rscn_gen_end; + enum scan_step step; u16 scan_retry; #define MAX_SCAN_RETRIES 5 enum scan_flags_t scan_flags; @@ -3537,9 +3548,8 @@ enum qla_work_type { QLA_EVT_RELOGIN, QLA_EVT_ASYNC_PRLO, QLA_EVT_ASYNC_PRLO_DONE, - QLA_EVT_GPNFT, - QLA_EVT_GPNFT_DONE, - QLA_EVT_GNNFT_DONE, + QLA_EVT_SCAN_CMD, + QLA_EVT_SCAN_FINISH, QLA_EVT_GFPNID, QLA_EVT_SP_RETRY, QLA_EVT_IIDMA, @@ -5030,6 +5040,7 @@ typedef struct scsi_qla_host { /* Counter to detect races between ELS and RSCN events */ atomic_t generation_tick; + atomic_t rscn_gen; /* Time when global fcport update has been scheduled */ int total_fcport_update_gen; /* List of pending LOGOs, protected by tgt_mutex */ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 7309310d2ab9..cededfda9d0e 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -728,9 +728,9 @@ int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *); void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *); int qla2x00_mgmt_svr_login(scsi_qla_host_t *); int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool); -int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *); -void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *); -void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *); +int qla_fab_async_scan(scsi_qla_host_t *, srb_t *); +void qla_fab_scan_start(struct scsi_qla_host *); +void qla_fab_scan_finish(scsi_qla_host_t *, srb_t *); int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *); void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 1cf9d200d563..d2bddca7045a 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1710,7 +1710,7 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); alen = scnprintf( eiter->a.orom_version, sizeof(eiter->a.orom_version), - "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]); + "%d.%02d", ha->efi_revision[1], ha->efi_revision[0]); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); @@ -3168,7 +3168,30 @@ static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn) return rc; } -void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) +static bool qla_ok_to_clear_rscn(scsi_qla_host_t *vha, fc_port_t *fcport) +{ + u32 rscn_gen; + + rscn_gen = atomic_read(&vha->rscn_gen); + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2017, + "%s %d %8phC rscn_gen %x start %x end %x current %x\n", + __func__, __LINE__, fcport->port_name, fcport->rscn_gen, + vha->scan.rscn_gen_start, vha->scan.rscn_gen_end, rscn_gen); + + if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_start, + vha->scan.rscn_gen_end)) + /* rscn came in before fabric scan */ + return true; + + if (val_is_in_range(fcport->rscn_gen, vha->scan.rscn_gen_end, rscn_gen)) + /* rscn came in after fabric scan */ + return false; + + /* rare: fcport's scan_needed + rscn_gen must be stale */ + return true; +} + +void qla_fab_scan_finish(scsi_qla_host_t *vha, srb_t *sp) { fc_port_t *fcport; u32 i, rc; @@ -3281,10 +3304,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) (fcport->scan_needed && fcport->port_type != FCT_INITIATOR && fcport->port_type != FCT_NVME_INITIATOR)) { + fcport->scan_needed = 0; qlt_schedule_sess_for_deletion(fcport); } fcport->d_id.b24 = rp->id.b24; - fcport->scan_needed = 0; break; } @@ -3325,7 +3348,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) do_delete = true; } - fcport->scan_needed = 0; + if (qla_ok_to_clear_rscn(vha, fcport)) + fcport->scan_needed = 0; + if (((qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) || @@ -3355,7 +3380,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) fcport->port_name, fcport->loop_id, fcport->login_retry); } - fcport->scan_needed = 0; + + if (qla_ok_to_clear_rscn(vha, fcport)) + fcport->scan_needed = 0; qla24xx_fcport_handle_login(vha, fcport); } } @@ -3379,14 +3406,11 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) } } -static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha, +static int qla2x00_post_next_scan_work(struct scsi_qla_host *vha, srb_t *sp, int cmd) { struct qla_work_evt *e; - if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE) - return QLA_PARAMETER_ERROR; - e = qla2x00_alloc_work(vha, cmd); if (!e) return QLA_FUNCTION_FAILED; @@ -3396,37 +3420,15 @@ static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha, return qla2x00_post_work(vha, e); } -static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha, - srb_t *sp, int cmd) -{ - struct qla_work_evt *e; - - if (cmd != QLA_EVT_GPNFT) - return QLA_PARAMETER_ERROR; - - e = qla2x00_alloc_work(vha, cmd); - if (!e) - return QLA_FUNCTION_FAILED; - - e->u.gpnft.fc4_type = FC4_TYPE_NVME; - e->u.gpnft.sp = sp; - - return qla2x00_post_work(vha, e); -} - static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, struct srb *sp) { struct qla_hw_data *ha = vha->hw; int num_fibre_dev = ha->max_fibre_devices; - struct ct_sns_req *ct_req = - (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; struct ct_sns_gpnft_rsp *ct_rsp = (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; struct ct_sns_gpn_ft_data *d; struct fab_scan_rp *rp; - u16 cmd = be16_to_cpu(ct_req->command); - u8 fc4_type = sp->gen2; int i, j, k; port_id_t id; u8 found; @@ -3445,85 +3447,83 @@ static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, if (id.b24 == 0 || wwn == 0) continue; - if (fc4_type == FC4_TYPE_FCP_SCSI) { - if (cmd == GPN_FT_CMD) { - rp = &vha->scan.l[j]; - rp->id = id; - memcpy(rp->port_name, d->port_name, 8); - j++; - rp->fc4type = FS_FC4TYPE_FCP; - } else { - for (k = 0; k < num_fibre_dev; k++) { - rp = &vha->scan.l[k]; - if (id.b24 == rp->id.b24) { - memcpy(rp->node_name, - d->port_name, 8); - break; - } + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2025, + "%s %06x %8ph \n", + __func__, id.b24, d->port_name); + + switch (vha->scan.step) { + case FAB_SCAN_GPNFT_FCP: + rp = &vha->scan.l[j]; + rp->id = id; + memcpy(rp->port_name, d->port_name, 8); + j++; + rp->fc4type = FS_FC4TYPE_FCP; + break; + case FAB_SCAN_GNNFT_FCP: + for (k = 0; k < num_fibre_dev; k++) { + rp = &vha->scan.l[k]; + if (id.b24 == rp->id.b24) { + memcpy(rp->node_name, + d->port_name, 8); + break; } } - } else { - /* Search if the fibre device supports FC4_TYPE_NVME */ - if (cmd == GPN_FT_CMD) { - found = 0; - - for (k = 0; k < num_fibre_dev; k++) { - rp = &vha->scan.l[k]; - if (!memcmp(rp->port_name, - d->port_name, 8)) { - /* - * Supports FC-NVMe & FCP - */ - rp->fc4type |= FS_FC4TYPE_NVME; - found = 1; - break; - } + break; + case FAB_SCAN_GPNFT_NVME: + found = 0; + + for (k = 0; k < num_fibre_dev; k++) { + rp = &vha->scan.l[k]; + if (!memcmp(rp->port_name, d->port_name, 8)) { + /* + * Supports FC-NVMe & FCP + */ + rp->fc4type |= FS_FC4TYPE_NVME; + found = 1; + break; } + } - /* We found new FC-NVMe only port */ - if (!found) { - for (k = 0; k < num_fibre_dev; k++) { - rp = &vha->scan.l[k]; - if (wwn_to_u64(rp->port_name)) { - continue; - } else { - rp->id = id; - memcpy(rp->port_name, - d->port_name, 8); - rp->fc4type = - FS_FC4TYPE_NVME; - break; - } - } - } - } else { + /* We found new FC-NVMe only port */ + if (!found) { for (k = 0; k < num_fibre_dev; k++) { rp = &vha->scan.l[k]; - if (id.b24 == rp->id.b24) { - memcpy(rp->node_name, - d->port_name, 8); + if (wwn_to_u64(rp->port_name)) { + continue; + } else { + rp->id = id; + memcpy(rp->port_name, d->port_name, 8); + rp->fc4type = FS_FC4TYPE_NVME; break; } } } + break; + case FAB_SCAN_GNNFT_NVME: + for (k = 0; k < num_fibre_dev; k++) { + rp = &vha->scan.l[k]; + if (id.b24 == rp->id.b24) { + memcpy(rp->node_name, d->port_name, 8); + break; + } + } + break; + default: + break; } } } -static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) +static void qla_async_scan_sp_done(srb_t *sp, int res) { struct scsi_qla_host *vha = sp->vha; - struct ct_sns_req *ct_req = - (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; - u16 cmd = be16_to_cpu(ct_req->command); - u8 fc4_type = sp->gen2; unsigned long flags; int rc; /* gen2 field is holding the fc4type */ - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async done-%s res %x FC4Type %x\n", - sp->name, res, sp->gen2); + ql_dbg(ql_dbg_disc, vha, 0x2026, + "Async done-%s res %x step %x\n", + sp->name, res, vha->scan.step); sp->rc = res; if (res) { @@ -3547,8 +3547,7 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) * sp for GNNFT_DONE work. This will allow all * the resource to get freed up. */ - rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, - QLA_EVT_GNNFT_DONE); + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH); if (rc) { /* Cleanup here to prevent memory leak */ qla24xx_sp_unmap(vha, sp); @@ -3573,28 +3572,30 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) qla2x00_find_free_fcp_nvme_slot(vha, sp); - if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled && - cmd == GNN_FT_CMD) { - spin_lock_irqsave(&vha->work_lock, flags); - vha->scan.scan_flags &= ~SF_SCANNING; - spin_unlock_irqrestore(&vha->work_lock, flags); + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); - sp->rc = res; - rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT); - if (rc) { - qla24xx_sp_unmap(vha, sp); - set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - } - return; - } + switch (vha->scan.step) { + case FAB_SCAN_GPNFT_FCP: + case FAB_SCAN_GPNFT_NVME: + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_CMD); + break; + case FAB_SCAN_GNNFT_FCP: + if (vha->flags.nvme_enabled) + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_CMD); + else + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH); - if (cmd == GPN_FT_CMD) { - rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, - QLA_EVT_GPNFT_DONE); - } else { - rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp, - QLA_EVT_GNNFT_DONE); + break; + case FAB_SCAN_GNNFT_NVME: + rc = qla2x00_post_next_scan_work(vha, sp, QLA_EVT_SCAN_FINISH); + break; + default: + /* should not be here */ + WARN_ON(1); + rc = QLA_FUNCTION_FAILED; + break; } if (rc) { @@ -3605,127 +3606,16 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) } } -/* - * Get WWNN list for fc4_type - * - * It is assumed the same SRB is re-used from GPNFT to avoid - * mem free & re-alloc - */ -static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, - u8 fc4_type) -{ - int rval = QLA_FUNCTION_FAILED; - struct ct_sns_req *ct_req; - struct ct_sns_pkt *ct_sns; - unsigned long flags; - - if (!vha->flags.online) { - spin_lock_irqsave(&vha->work_lock, flags); - vha->scan.scan_flags &= ~SF_SCANNING; - spin_unlock_irqrestore(&vha->work_lock, flags); - goto done_free_sp; - } - - if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) { - ql_log(ql_log_warn, vha, 0xffff, - "%s: req %p rsp %p are not setup\n", - __func__, sp->u.iocb_cmd.u.ctarg.req, - sp->u.iocb_cmd.u.ctarg.rsp); - spin_lock_irqsave(&vha->work_lock, flags); - vha->scan.scan_flags &= ~SF_SCANNING; - spin_unlock_irqrestore(&vha->work_lock, flags); - WARN_ON(1); - set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - goto done_free_sp; - } - - ql_dbg(ql_dbg_disc, vha, 0xfffff, - "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n", - __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size, - sp->u.iocb_cmd.u.ctarg.req_size); - - sp->type = SRB_CT_PTHRU_CMD; - sp->name = "gnnft"; - sp->gen1 = vha->hw->base_qpair->chip_reset; - sp->gen2 = fc4_type; - qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, - qla2x00_async_gpnft_gnnft_sp_done); - - memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); - memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); - - ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; - /* CT_IU preamble */ - ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, - sp->u.iocb_cmd.u.ctarg.rsp_size); - - /* GPN_FT req */ - ct_req->req.gpn_ft.port_type = fc4_type; - - sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; - sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s hdl=%x FC4Type %x.\n", sp->name, - sp->handle, ct_req->req.gpn_ft.port_type); - - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) { - goto done_free_sp; - } - - return rval; - -done_free_sp: - if (sp->u.iocb_cmd.u.ctarg.req) { - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.req_allocated_size, - sp->u.iocb_cmd.u.ctarg.req, - sp->u.iocb_cmd.u.ctarg.req_dma); - sp->u.iocb_cmd.u.ctarg.req = NULL; - } - if (sp->u.iocb_cmd.u.ctarg.rsp) { - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, - sp->u.iocb_cmd.u.ctarg.rsp, - sp->u.iocb_cmd.u.ctarg.rsp_dma); - sp->u.iocb_cmd.u.ctarg.rsp = NULL; - } - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); - - spin_lock_irqsave(&vha->work_lock, flags); - vha->scan.scan_flags &= ~SF_SCANNING; - if (vha->scan.scan_flags == 0) { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s: schedule\n", __func__); - vha->scan.scan_flags |= SF_QUEUED; - schedule_delayed_work(&vha->scan.scan_work, 5); - } - spin_unlock_irqrestore(&vha->work_lock, flags); - - - return rval; -} /* GNNFT */ - -void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp) -{ - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, - "%s enter\n", __func__); - qla24xx_async_gnnft(vha, sp, sp->gen2); -} - /* Get WWPN list for certain fc4_type */ -int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) +int qla_fab_async_scan(scsi_qla_host_t *vha, srb_t *sp) { int rval = QLA_FUNCTION_FAILED; struct ct_sns_req *ct_req; struct ct_sns_pkt *ct_sns; - u32 rspsz; + u32 rspsz = 0; unsigned long flags; - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x200c, "%s enter\n", __func__); if (!vha->flags.online) @@ -3734,22 +3624,21 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) spin_lock_irqsave(&vha->work_lock, flags); if (vha->scan.scan_flags & SF_SCANNING) { spin_unlock_irqrestore(&vha->work_lock, flags); - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2012, "%s: scan active\n", __func__); return rval; } vha->scan.scan_flags |= SF_SCANNING; + if (!sp) + vha->scan.step = FAB_SCAN_START; + spin_unlock_irqrestore(&vha->work_lock, flags); - if (fc4_type == FC4_TYPE_FCP_SCSI) { - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + switch (vha->scan.step) { + case FAB_SCAN_START: + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2018, "%s: Performing FCP Scan\n", __func__); - if (sp) { - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); - } - /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) { @@ -3765,7 +3654,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); if (!sp->u.iocb_cmd.u.ctarg.req) { - ql_log(ql_log_warn, vha, 0xffff, + ql_log(ql_log_warn, vha, 0x201a, "Failed to allocate ct_sns request.\n"); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; @@ -3773,7 +3662,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) qla2x00_rel_sp(sp); return rval; } - sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; rspsz = sizeof(struct ct_sns_gpnft_rsp) + vha->hw->max_fibre_devices * @@ -3785,7 +3673,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) GFP_KERNEL); sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz; if (!sp->u.iocb_cmd.u.ctarg.rsp) { - ql_log(ql_log_warn, vha, 0xffff, + ql_log(ql_log_warn, vha, 0x201b, "Failed to allocate ct_sns request.\n"); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; @@ -3805,35 +3693,95 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) "%s scan list size %d\n", __func__, vha->scan.size); memset(vha->scan.l, 0, vha->scan.size); - } else if (!sp) { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "NVME scan did not provide SP\n"); + + vha->scan.step = FAB_SCAN_GPNFT_FCP; + break; + case FAB_SCAN_GPNFT_FCP: + vha->scan.step = FAB_SCAN_GNNFT_FCP; + break; + case FAB_SCAN_GNNFT_FCP: + vha->scan.step = FAB_SCAN_GPNFT_NVME; + break; + case FAB_SCAN_GPNFT_NVME: + vha->scan.step = FAB_SCAN_GNNFT_NVME; + break; + case FAB_SCAN_GNNFT_NVME: + default: + /* should not be here */ + WARN_ON(1); + goto done_free_sp; + } + + if (!sp) { + ql_dbg(ql_dbg_disc, vha, 0x201c, + "scan did not provide SP\n"); return rval; } + if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) { + ql_log(ql_log_warn, vha, 0x201d, + "%s: req %p rsp %p are not setup\n", + __func__, sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.rsp); + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); + WARN_ON(1); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + goto done_free_sp; + } + + rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; + memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); + memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); + sp->type = SRB_CT_PTHRU_CMD; - sp->name = "gpnft"; sp->gen1 = vha->hw->base_qpair->chip_reset; - sp->gen2 = fc4_type; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, - qla2x00_async_gpnft_gnnft_sp_done); - - rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; - memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); - memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); + qla_async_scan_sp_done); ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; - /* CT_IU preamble */ - ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); - /* GPN_FT req */ - ct_req->req.gpn_ft.port_type = fc4_type; + /* CT_IU preamble */ + switch (vha->scan.step) { + case FAB_SCAN_GPNFT_FCP: + sp->name = "gpnft"; + ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); + ct_req->req.gpn_ft.port_type = FC4_TYPE_FCP_SCSI; + sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; + break; + case FAB_SCAN_GNNFT_FCP: + sp->name = "gnnft"; + ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, rspsz); + ct_req->req.gpn_ft.port_type = FC4_TYPE_FCP_SCSI; + sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; + break; + case FAB_SCAN_GPNFT_NVME: + sp->name = "gpnft"; + ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz); + ct_req->req.gpn_ft.port_type = FC4_TYPE_NVME; + sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; + break; + case FAB_SCAN_GNNFT_NVME: + sp->name = "gnnft"; + ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD, rspsz); + ct_req->req.gpn_ft.port_type = FC4_TYPE_NVME; + sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; + break; + default: + /* should not be here */ + WARN_ON(1); + goto done_free_sp; + } sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s hdl=%x FC4Type %x.\n", sp->name, - sp->handle, ct_req->req.gpn_ft.port_type); + ql_dbg(ql_dbg_disc, vha, 0x2003, + "%s: step %d, rsp size %d, req size %d hdl %x %s FC4TYPE %x \n", + __func__, vha->scan.step, sp->u.iocb_cmd.u.ctarg.rsp_size, + sp->u.iocb_cmd.u.ctarg.req_size, sp->handle, sp->name, + ct_req->req.gpn_ft.port_type); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { @@ -3864,7 +3812,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; if (vha->scan.scan_flags == 0) { - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2007, "%s: Scan scheduled.\n", __func__); vha->scan.scan_flags |= SF_QUEUED; schedule_delayed_work(&vha->scan.scan_work, 5); @@ -3875,6 +3823,15 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) return rval; } +void qla_fab_scan_start(struct scsi_qla_host *vha) +{ + int rval; + + rval = qla_fab_async_scan(vha, NULL); + if (rval) + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); +} + void qla_scan_work_fn(struct work_struct *work) { struct fab_scan *s = container_of(to_delayed_work(work), diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 8377624d76c9..eda3bdab934d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1842,10 +1842,18 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, return qla2x00_post_work(vha, e); } +static void qla_rscn_gen_tick(scsi_qla_host_t *vha, u32 *ret_rscn_gen) +{ + *ret_rscn_gen = atomic_inc_return(&vha->rscn_gen); + /* memory barrier */ + wmb(); +} + void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport; unsigned long flags; + u32 rscn_gen; switch (ea->id.b.rsvd_1) { case RSCN_PORT_ADDR: @@ -1875,15 +1883,16 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) * Otherwise we're already in the middle of a relogin */ fcport->scan_needed = 1; - fcport->rscn_gen++; + qla_rscn_gen_tick(vha, &fcport->rscn_gen); } } else { fcport->scan_needed = 1; - fcport->rscn_gen++; + qla_rscn_gen_tick(vha, &fcport->rscn_gen); } } break; case RSCN_AREA_ADDR: + qla_rscn_gen_tick(vha, &rscn_gen); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) @@ -1891,11 +1900,12 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) { fcport->scan_needed = 1; - fcport->rscn_gen++; + fcport->rscn_gen = rscn_gen; } } break; case RSCN_DOM_ADDR: + qla_rscn_gen_tick(vha, &rscn_gen); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) @@ -1903,19 +1913,20 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) { fcport->scan_needed = 1; - fcport->rscn_gen++; + fcport->rscn_gen = rscn_gen; } } break; case RSCN_FAB_ADDR: default: + qla_rscn_gen_tick(vha, &rscn_gen); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) continue; fcport->scan_needed = 1; - fcport->rscn_gen++; + fcport->rscn_gen = rscn_gen; } break; } @@ -1924,6 +1935,7 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) if (vha->scan.scan_flags == 0) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__); vha->scan.scan_flags |= SF_QUEUED; + vha->scan.rscn_gen_start = atomic_read(&vha->rscn_gen); schedule_delayed_work(&vha->scan.scan_work, 5); } spin_unlock_irqrestore(&vha->work_lock, flags); @@ -6393,10 +6405,9 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) qlt_do_generation_tick(vha, &discovery_gen); if (USE_ASYNC_SCAN(ha)) { - rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI, - NULL); - if (rval) - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + /* start of scan begins here */ + vha->scan.rscn_gen_end = atomic_read(&vha->rscn_gen); + qla_fab_scan_start(vha); } else { list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->scan_state = QLA_FCPORT_SCAN; @@ -8207,15 +8218,21 @@ qla28xx_get_aux_images( struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status; bool valid_pri_image = false, valid_sec_image = false; bool active_pri_image = false, active_sec_image = false; + int rc; if (!ha->flt_region_aux_img_status_pri) { ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n"); goto check_sec_image; } - qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, + rc = qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, ha->flt_region_aux_img_status_pri, sizeof(pri_aux_image_status) >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x01a1, + "Unable to read Primary aux image(%x).\n", rc); + goto check_sec_image; + } qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) { @@ -8246,9 +8263,15 @@ qla28xx_get_aux_images( goto check_valid_image; } - qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, + rc = qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, ha->flt_region_aux_img_status_sec, sizeof(sec_aux_image_status) >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x01a2, + "Unable to read Secondary aux image(%x).\n", rc); + goto check_valid_image; + } + qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) { @@ -8306,6 +8329,7 @@ qla27xx_get_active_image(struct scsi_qla_host *vha, struct qla27xx_image_status pri_image_status, sec_image_status; bool valid_pri_image = false, valid_sec_image = false; bool active_pri_image = false, active_sec_image = false; + int rc; if (!ha->flt_region_img_status_pri) { ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n"); @@ -8347,8 +8371,14 @@ qla27xx_get_active_image(struct scsi_qla_host *vha, goto check_valid_image; } - qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), + rc = qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x01a3, + "Unable to read Secondary image status(%x).\n", rc); + goto check_valid_image; + } + qla27xx_print_image(vha, "Secondary image", &sec_image_status); if (qla27xx_check_image_status_signature(&sec_image_status)) { @@ -8420,11 +8450,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, "FW: Loading firmware from flash (%x).\n", faddr); dcode = (uint32_t *)req->ring; - qla24xx_read_flash_data(vha, dcode, faddr, 8); - if (qla24xx_risc_firmware_invalid(dcode)) { + rval = qla24xx_read_flash_data(vha, dcode, faddr, 8); + if (rval || qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_fatal, vha, 0x008c, - "Unable to verify the integrity of flash firmware " - "image.\n"); + "Unable to verify the integrity of flash firmware image (rval %x).\n", rval); ql_log(ql_log_fatal, vha, 0x008d, "Firmware data: %08x %08x %08x %08x.\n", dcode[0], dcode[1], dcode[2], dcode[3]); @@ -8438,7 +8467,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, for (j = 0; j < segments; j++) { ql_dbg(ql_dbg_init, vha, 0x008d, "-> Loading segment %u...\n", j); - qla24xx_read_flash_data(vha, dcode, faddr, 10); + rval = qla24xx_read_flash_data(vha, dcode, faddr, 10); + if (rval) { + ql_log(ql_log_fatal, vha, 0x016a, + "-> Unable to read segment addr + size .\n"); + return QLA_FUNCTION_FAILED; + } risc_addr = be32_to_cpu((__force __be32)dcode[2]); risc_size = be32_to_cpu((__force __be32)dcode[3]); if (!*srisc_addr) { @@ -8454,7 +8488,13 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, ql_dbg(ql_dbg_init, vha, 0x008e, "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n", fragment, risc_addr, faddr, dlen); - qla24xx_read_flash_data(vha, dcode, faddr, dlen); + rval = qla24xx_read_flash_data(vha, dcode, faddr, dlen); + if (rval) { + ql_log(ql_log_fatal, vha, 0x016b, + "-> Unable to read fragment(faddr %#x dlen %#lx).\n", + faddr, dlen); + return QLA_FUNCTION_FAILED; + } for (i = 0; i < dlen; i++) dcode[i] = swab32(dcode[i]); @@ -8483,7 +8523,14 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, fwdt->length = 0; dcode = (uint32_t *)req->ring; - qla24xx_read_flash_data(vha, dcode, faddr, 7); + + rval = qla24xx_read_flash_data(vha, dcode, faddr, 7); + if (rval) { + ql_log(ql_log_fatal, vha, 0x016c, + "-> Unable to read template size.\n"); + goto failed; + } + risc_size = be32_to_cpu((__force __be32)dcode[2]); ql_dbg(ql_dbg_init, vha, 0x0161, "-> fwdt%u template array at %#x (%#x dwords)\n", @@ -8509,11 +8556,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, } dcode = fwdt->template; - qla24xx_read_flash_data(vha, dcode, faddr, risc_size); + rval = qla24xx_read_flash_data(vha, dcode, faddr, risc_size); - if (!qla27xx_fwdt_template_valid(dcode)) { + if (rval || !qla27xx_fwdt_template_valid(dcode)) { ql_log(ql_log_warn, vha, 0x0165, - "-> fwdt%u failed template validate\n", j); + "-> fwdt%u failed template validate (rval %x)\n", + j, rval); goto failed; } diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index a4a56ab0ba74..ef4b3cc1cd77 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -631,3 +631,11 @@ static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha) } return 0; } + +static inline bool val_is_in_range(u32 val, u32 start, u32 end) +{ + if (val >= start && val <= end) + return true; + else + return false; +} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index b67416951a5f..76703f2706b8 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -180,7 +180,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); list_for_each_entry(fcport, &vha->vp_fcports, list) - fcport->logout_on_delete = 0; + fcport->logout_on_delete = 1; if (!vha->hw->flags.edif_enabled) qla2x00_wait_for_sess_deletion(vha); diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index a8ddf356e662..8f4cc136a9c9 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -49,7 +49,10 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) return 0; } - if (!vha->nvme_local_port && qla_nvme_register_hba(vha)) + if (qla_nvme_register_hba(vha)) + return 0; + + if (!vha->nvme_local_port) return 0; if (!(fcport->nvme_prli_service_param & diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index fcb06df2ce4e..bc3b2aea3f8b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1875,14 +1875,9 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { - /* - * perform lockless completion during driver unload - */ if (qla2x00_chip_is_down(vha)) { req->outstanding_cmds[cnt] = NULL; - spin_unlock_irqrestore(qp->qp_lock_ptr, flags); sp->done(sp, res); - spin_lock_irqsave(qp->qp_lock_ptr, flags); continue; } @@ -4689,7 +4684,7 @@ static void qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) { u32 temp; - struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb; + struct init_cb_81xx *icb = (struct init_cb_81xx *)vha->hw->init_cb; *ret_cnt = FW_DEF_EXCHANGES_CNT; if (max_cnt > vha->hw->max_exchg) @@ -5563,15 +5558,11 @@ qla2x00_do_work(struct scsi_qla_host *vha) qla2x00_async_prlo_done(vha, e->u.logio.fcport, e->u.logio.data); break; - case QLA_EVT_GPNFT: - qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type, - e->u.gpnft.sp); - break; - case QLA_EVT_GPNFT_DONE: - qla24xx_async_gpnft_done(vha, e->u.iosb.sp); + case QLA_EVT_SCAN_CMD: + qla_fab_async_scan(vha, e->u.iosb.sp); break; - case QLA_EVT_GNNFT_DONE: - qla24xx_async_gnnft_done(vha, e->u.iosb.sp); + case QLA_EVT_SCAN_FINISH: + qla_fab_scan_finish(vha, e->u.iosb.sp); break; case QLA_EVT_GFPNID: qla24xx_async_gfpnid(vha, e->u.fcport.fcport); diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index c092a6b1ced4..6d16546e1729 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -555,6 +555,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) struct qla_flt_location *fltl = (void *)req->ring; uint32_t *dcode = (uint32_t *)req->ring; uint8_t *buf = (void *)req->ring, *bcode, last_image; + int rc; /* * FLT-location structure resides after the last PCI region. @@ -584,14 +585,24 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) pcihdr = 0; do { /* Verify PCI expansion ROM header. */ - qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); + rc = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); + if (rc) { + ql_log(ql_log_info, vha, 0x016d, + "Unable to read PCI Expansion Rom Header (%x).\n", rc); + return QLA_FUNCTION_FAILED; + } bcode = buf + (pcihdr % 4); if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) goto end; /* Locate PCI data structure. */ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); - qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); + rc = qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); + if (rc) { + ql_log(ql_log_info, vha, 0x0179, + "Unable to read PCI Data Structure (%x).\n", rc); + return QLA_FUNCTION_FAILED; + } bcode = buf + (pcihdr % 4); /* Validate signature of PCI data structure. */ @@ -606,7 +617,12 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) } while (!last_image); /* Now verify FLT-location structure. */ - qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2); + rc = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x017a, + "Unable to read FLT (%x).\n", rc); + return QLA_FUNCTION_FAILED; + } if (memcmp(fltl->sig, "QFLT", 4)) goto end; @@ -2605,13 +2621,18 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { struct qla_hw_data *ha = vha->hw; + int rc; /* Suspend HBA. */ scsi_block_requests(vha->host); set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); /* Go with read. */ - qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2); + rc = qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x01a0, + "Unable to perform optrom read(%x).\n", rc); + } /* Resume HBA. */ clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); @@ -3412,7 +3433,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) struct active_regions active_regions = { }; if (IS_P3P_TYPE(ha)) - return ret; + return QLA_SUCCESS; if (!mbuf) return QLA_FUNCTION_FAILED; @@ -3432,20 +3453,31 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) do { /* Verify PCI expansion ROM header. */ - qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); + ret = qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); + if (ret) { + ql_log(ql_log_info, vha, 0x017d, + "Unable to read PCI EXP Rom Header(%x).\n", ret); + return QLA_FUNCTION_FAILED; + } + bcode = mbuf + (pcihdr % 4); if (memcmp(bcode, "\x55\xaa", 2)) { /* No signature */ ql_log(ql_log_fatal, vha, 0x0059, "No matching ROM signature.\n"); - ret = QLA_FUNCTION_FAILED; - break; + return QLA_FUNCTION_FAILED; } /* Locate PCI data structure. */ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); - qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); + ret = qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20); + if (ret) { + ql_log(ql_log_info, vha, 0x018e, + "Unable to read PCI Data Structure (%x).\n", ret); + return QLA_FUNCTION_FAILED; + } + bcode = mbuf + (pcihdr % 4); /* Validate signature of PCI data structure. */ @@ -3454,8 +3486,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) ql_log(ql_log_fatal, vha, 0x005a, "PCI data struct not found pcir_adr=%x.\n", pcids); ql_dump_buffer(ql_dbg_init, vha, 0x0059, dcode, 32); - ret = QLA_FUNCTION_FAILED; - break; + return QLA_FUNCTION_FAILED; } /* Read version */ @@ -3507,20 +3538,26 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) faddr = ha->flt_region_fw_sec; } - qla24xx_read_flash_data(vha, dcode, faddr, 8); - if (qla24xx_risc_firmware_invalid(dcode)) { - ql_log(ql_log_warn, vha, 0x005f, - "Unrecognized fw revision at %x.\n", - ha->flt_region_fw * 4); - ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32); + ret = qla24xx_read_flash_data(vha, dcode, faddr, 8); + if (ret) { + ql_log(ql_log_info, vha, 0x019e, + "Unable to read FW version (%x).\n", ret); + return ret; } else { - for (i = 0; i < 4; i++) - ha->fw_revision[i] = + if (qla24xx_risc_firmware_invalid(dcode)) { + ql_log(ql_log_warn, vha, 0x005f, + "Unrecognized fw revision at %x.\n", + ha->flt_region_fw * 4); + ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32); + } else { + for (i = 0; i < 4; i++) + ha->fw_revision[i] = be32_to_cpu((__force __be32)dcode[4+i]); - ql_dbg(ql_dbg_init, vha, 0x0060, - "Firmware revision (flash) %u.%u.%u (%x).\n", - ha->fw_revision[0], ha->fw_revision[1], - ha->fw_revision[2], ha->fw_revision[3]); + ql_dbg(ql_dbg_init, vha, 0x0060, + "Firmware revision (flash) %u.%u.%u (%x).\n", + ha->fw_revision[0], ha->fw_revision[1], + ha->fw_revision[2], ha->fw_revision[3]); + } } /* Check for golden firmware and get version if available */ @@ -3531,18 +3568,23 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version)); faddr = ha->flt_region_gold_fw; - qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8); - if (qla24xx_risc_firmware_invalid(dcode)) { - ql_log(ql_log_warn, vha, 0x0056, - "Unrecognized golden fw at %#x.\n", faddr); - ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32); + ret = qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8); + if (ret) { + ql_log(ql_log_info, vha, 0x019f, + "Unable to read Gold FW version (%x).\n", ret); return ret; - } - - for (i = 0; i < 4; i++) - ha->gold_fw_version[i] = - be32_to_cpu((__force __be32)dcode[4+i]); + } else { + if (qla24xx_risc_firmware_invalid(dcode)) { + ql_log(ql_log_warn, vha, 0x0056, + "Unrecognized golden fw at %#x.\n", faddr); + ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32); + return QLA_FUNCTION_FAILED; + } + for (i = 0; i < 4; i++) + ha->gold_fw_version[i] = + be32_to_cpu((__force __be32)dcode[4+i]); + } return ret; } diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index a0d2556a27bb..089653018d32 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -431,7 +431,7 @@ int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed) struct packet_command cgc; /* avoid exceeding the max speed or overflowing integer bounds */ - speed = clamp(0, speed, 0xffff / 177); + speed = clamp(speed, 0, 0xffff / 177); if (speed == 0) speed = 0xffff; /* set to max */ diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c index b5af1fb5847e..01b129caf1eb 100644 --- a/drivers/soc/mediatek/mtk-mutex.c +++ b/drivers/soc/mediatek/mtk-mutex.c @@ -524,6 +524,7 @@ static const unsigned int mt8188_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { [MUTEX_MOD_IDX_MDP_PAD0] = MT8195_MUTEX_MOD_MDP_PAD0, [MUTEX_MOD_IDX_MDP_PAD2] = MT8195_MUTEX_MOD_MDP_PAD2, [MUTEX_MOD_IDX_MDP_PAD3] = MT8195_MUTEX_MOD_MDP_PAD3, + [MUTEX_MOD_IDX_MDP_TCC0] = MT8195_MUTEX_MOD_MDP_TCC0, [MUTEX_MOD_IDX_MDP_WROT0] = MT8195_MUTEX_MOD_MDP_WROT0, [MUTEX_MOD_IDX_MDP_WROT2] = MT8195_MUTEX_MOD_MDP_WROT2, [MUTEX_MOD_IDX_MDP_WROT3] = MT8195_MUTEX_MOD_MDP_WROT3, diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c index fb323b3364db..ecddb60bd665 100644 --- a/drivers/soc/qcom/icc-bwmon.c +++ b/drivers/soc/qcom/icc-bwmon.c @@ -565,7 +565,7 @@ static void bwmon_start(struct icc_bwmon *bwmon) int window; /* No need to check for errors, as this must have succeeded before. */ - dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0); + dev_pm_opp_put(dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0)); bwmon_clear_counters(bwmon, true); @@ -772,11 +772,13 @@ static int bwmon_probe(struct platform_device *pdev) opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0); if (IS_ERR(opp)) return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n"); + dev_pm_opp_put(opp); bwmon->min_bw_kbps = 0; opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0); if (IS_ERR(opp)) return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n"); + dev_pm_opp_put(opp); bwmon->dev = dev; diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c index a1b6a4081dea..216166e98fae 100644 --- a/drivers/soc/qcom/pdr_interface.c +++ b/drivers/soc/qcom/pdr_interface.c @@ -76,12 +76,12 @@ static int pdr_locator_new_server(struct qmi_handle *qmi, locator_hdl); struct pdr_service *pds; + mutex_lock(&pdr->lock); /* Create a local client port for QMI communication */ pdr->locator_addr.sq_family = AF_QIPCRTR; pdr->locator_addr.sq_node = svc->node; pdr->locator_addr.sq_port = svc->port; - mutex_lock(&pdr->lock); pdr->locator_init_complete = true; mutex_unlock(&pdr->lock); @@ -104,10 +104,10 @@ static void pdr_locator_del_server(struct qmi_handle *qmi, mutex_lock(&pdr->lock); pdr->locator_init_complete = false; - mutex_unlock(&pdr->lock); pdr->locator_addr.sq_node = 0; pdr->locator_addr.sq_port = 0; + mutex_unlock(&pdr->lock); } static const struct qmi_ops pdr_locator_ops = { @@ -365,12 +365,14 @@ static int pdr_get_domain_list(struct servreg_get_domain_list_req *req, if (ret < 0) return ret; + mutex_lock(&pdr->lock); ret = qmi_send_request(&pdr->locator_hdl, &pdr->locator_addr, &txn, SERVREG_GET_DOMAIN_LIST_REQ, SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN, servreg_get_domain_list_req_ei, req); + mutex_unlock(&pdr->lock); if (ret < 0) { qmi_txn_cancel(&txn); return ret; @@ -415,7 +417,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) if (ret < 0) goto out; - for (i = domains_read; i < resp->domain_list_len; i++) { + for (i = 0; i < resp->domain_list_len; i++) { entry = &resp->domain_list[i]; if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name)) diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c index 65279243072c..9ebc0ba35947 100644 --- a/drivers/soc/qcom/pmic_glink.c +++ b/drivers/soc/qcom/pmic_glink.c @@ -373,8 +373,17 @@ static struct platform_driver pmic_glink_driver = { static int pmic_glink_init(void) { - platform_driver_register(&pmic_glink_driver); - register_rpmsg_driver(&pmic_glink_rpmsg_driver); + int ret; + + ret = platform_driver_register(&pmic_glink_driver); + if (ret < 0) + return ret; + + ret = register_rpmsg_driver(&pmic_glink_rpmsg_driver); + if (ret < 0) { + platform_driver_unregister(&pmic_glink_driver); + return ret; + } return 0; } diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index 561d8037b50a..de86009ecd91 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -646,13 +646,14 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) { struct tcs_group *tcs; int tcs_id; - unsigned long flags; + + might_sleep(); tcs = get_tcs_for_msg(drv, msg); if (IS_ERR(tcs)) return PTR_ERR(tcs); - spin_lock_irqsave(&drv->lock, flags); + spin_lock_irq(&drv->lock); /* Wait forever for a free tcs. It better be there eventually! */ wait_event_lock_irq(drv->tcs_wait, @@ -670,7 +671,7 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0); enable_tcs_irq(drv, tcs_id, true); } - spin_unlock_irqrestore(&drv->lock, flags); + spin_unlock_irq(&drv->lock); /* * These two can be done after the lock is released because: diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index 9f26d7f9b9dc..8903ed956312 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -183,7 +183,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state, } if (state == RPMH_ACTIVE_ONLY_STATE) { - WARN_ON(irqs_disabled()); ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); } else { /* Clean up our call by spoofing tx_done */ diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 277c07a6603d..41342c37916a 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -133,7 +133,8 @@ static const char *const pmic_models[] = { [72] = "PMR735D", [73] = "PM8550", [74] = "PMK8550", - [82] = "SMB2360", + [82] = "PMC8380", + [83] = "SMB2360", }; struct socinfo_params { diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c index 253299e4214d..366018f6a0ee 100644 --- a/drivers/soc/xilinx/xlnx_event_manager.c +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -3,6 +3,7 @@ * Xilinx Event Management Driver * * Copyright (C) 2021 Xilinx, Inc. + * Copyright (C) 2024 Advanced Micro Devices, Inc. * * Abhyuday Godhasara <abhyuday.godhasara@xxxxxxxxxx> */ @@ -19,7 +20,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> -static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1); +static DEFINE_PER_CPU_READ_MOSTLY(int, dummy_cpu_number); static int virq_sgi; static int event_manager_availability = -EACCES; @@ -570,7 +571,6 @@ static void xlnx_disable_percpu_irq(void *data) static int xlnx_event_init_sgi(struct platform_device *pdev) { int ret = 0; - int cpu; /* * IRQ related structures are used for the following: * for each SGI interrupt ensure its mapped by GIC IRQ domain @@ -607,11 +607,8 @@ static int xlnx_event_init_sgi(struct platform_device *pdev) sgi_fwspec.param[0] = sgi_num; virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec); - cpu = get_cpu(); - per_cpu(cpu_number1, cpu) = cpu; ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt", - &cpu_number1); - put_cpu(); + &dummy_cpu_number); WARN_ON(ret); if (ret) { @@ -627,16 +624,12 @@ static int xlnx_event_init_sgi(struct platform_device *pdev) static void xlnx_event_cleanup_sgi(struct platform_device *pdev) { - int cpu = smp_processor_id(); - - per_cpu(cpu_number1, cpu) = cpu; - cpuhp_remove_state(CPUHP_AP_ONLINE_DYN); on_each_cpu(xlnx_disable_percpu_irq, NULL, 1); irq_clear_status_flags(virq_sgi, IRQ_PER_CPU); - free_percpu_irq(virq_sgi, &cpu_number1); + free_percpu_irq(virq_sgi, &dummy_cpu_number); irq_dispose_mapping(virq_sgi); } diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c index 965b1143936a..b82c01373f53 100644 --- a/drivers/soc/xilinx/zynqmp_power.c +++ b/drivers/soc/xilinx/zynqmp_power.c @@ -190,7 +190,9 @@ static int zynqmp_pm_probe(struct platform_device *pdev) u32 pm_api_version; struct mbox_client *client; - zynqmp_pm_get_api_version(&pm_api_version); + ret = zynqmp_pm_get_api_version(&pm_api_version); + if (ret) + return ret; /* Check PM API version number */ if (pm_api_version < ZYNQMP_PM_VERSION) diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 370c4d1572ed..5aaff3bee1b7 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -756,8 +756,15 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); int ret; - clk_prepare(aq->pclk); - clk_prepare(aq->qspick); + ret = clk_prepare(aq->pclk); + if (ret) + return ret; + + ret = clk_prepare(aq->qspick); + if (ret) { + clk_unprepare(aq->pclk); + return ret; + } ret = pm_runtime_force_resume(dev); if (ret < 0) diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c index 634364c7cfe6..99c25e6a937f 100644 --- a/drivers/spi/spi-microchip-core.c +++ b/drivers/spi/spi-microchip-core.c @@ -21,7 +21,7 @@ #include <linux/spi/spi.h> #define MAX_LEN (0xffff) -#define MAX_CS (8) +#define MAX_CS (1) #define DEFAULT_FRAMESIZE (8) #define FIFO_DEPTH (32) #define CLK_GEN_MODE1_MAX (255) @@ -75,6 +75,7 @@ #define REG_CONTROL (0x00) #define REG_FRAME_SIZE (0x04) +#define FRAME_SIZE_MASK GENMASK(5, 0) #define REG_STATUS (0x08) #define REG_INT_CLEAR (0x0c) #define REG_RX_DATA (0x10) @@ -89,6 +90,9 @@ #define REG_RIS (0x24) #define REG_CONTROL2 (0x28) #define REG_COMMAND (0x2c) +#define COMMAND_CLRFRAMECNT BIT(4) +#define COMMAND_TXFIFORST BIT(3) +#define COMMAND_RXFIFORST BIT(2) #define REG_PKTSIZE (0x30) #define REG_CMD_SIZE (0x34) #define REG_HWSTATUS (0x38) @@ -103,6 +107,7 @@ struct mchp_corespi { u8 *rx_buf; u32 clk_gen; /* divider for spi output clock generated by the controller */ u32 clk_mode; + u32 pending_slave_select; int irq; int tx_len; int rx_len; @@ -148,62 +153,59 @@ static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi) static void mchp_corespi_enable_ints(struct mchp_corespi *spi) { - u32 control, mask = INT_ENABLE_MASK; - - mchp_corespi_disable(spi); - - control = mchp_corespi_read(spi, REG_CONTROL); - - control |= mask; - mchp_corespi_write(spi, REG_CONTROL, control); + u32 control = mchp_corespi_read(spi, REG_CONTROL); - control |= CONTROL_ENABLE; + control |= INT_ENABLE_MASK; mchp_corespi_write(spi, REG_CONTROL, control); } static void mchp_corespi_disable_ints(struct mchp_corespi *spi) { - u32 control, mask = INT_ENABLE_MASK; - - mchp_corespi_disable(spi); - - control = mchp_corespi_read(spi, REG_CONTROL); - control &= ~mask; - mchp_corespi_write(spi, REG_CONTROL, control); + u32 control = mchp_corespi_read(spi, REG_CONTROL); - control |= CONTROL_ENABLE; + control &= ~INT_ENABLE_MASK; mchp_corespi_write(spi, REG_CONTROL, control); } static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len) { u32 control; - u16 lenpart; + u32 lenpart; + u32 frames = mchp_corespi_read(spi, REG_FRAMESUP); /* - * Disable the SPI controller. Writes to transfer length have - * no effect when the controller is enabled. + * Writing to FRAMECNT in REG_CONTROL will reset the frame count, taking + * a shortcut requires an explicit clear. */ - mchp_corespi_disable(spi); + if (frames == len) { + mchp_corespi_write(spi, REG_COMMAND, COMMAND_CLRFRAMECNT); + return; + } /* * The lower 16 bits of the frame count are stored in the control reg * for legacy reasons, but the upper 16 written to a different register: * FRAMESUP. While both the upper and lower bits can be *READ* from the - * FRAMESUP register, writing to the lower 16 bits is a NOP + * FRAMESUP register, writing to the lower 16 bits is (supposedly) a NOP. + * + * The driver used to disable the controller while modifying the frame + * count, and mask off the lower 16 bits of len while writing to + * FRAMES_UP. When the driver was changed to disable the controller as + * infrequently as possible, it was discovered that the logic of + * lenpart = len & 0xffff_0000 + * write(REG_FRAMESUP, lenpart) + * would actually write zeros into the lower 16 bits on an mpfs250t-es, + * despite documentation stating these bits were read-only. + * Writing len unmasked into FRAMES_UP ensures those bits aren't zeroed + * on an mpfs250t-es and will be a NOP for the lower 16 bits on hardware + * that matches the documentation. */ lenpart = len & 0xffff; - control = mchp_corespi_read(spi, REG_CONTROL); control &= ~CONTROL_FRAMECNT_MASK; control |= lenpart << CONTROL_FRAMECNT_SHIFT; mchp_corespi_write(spi, REG_CONTROL, control); - - lenpart = len & 0xffff0000; - mchp_corespi_write(spi, REG_FRAMESUP, lenpart); - - control |= CONTROL_ENABLE; - mchp_corespi_write(spi, REG_CONTROL, control); + mchp_corespi_write(spi, REG_FRAMESUP, len); } static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi) @@ -226,17 +228,22 @@ static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi) static inline void mchp_corespi_set_framesize(struct mchp_corespi *spi, int bt) { + u32 frame_size = mchp_corespi_read(spi, REG_FRAME_SIZE); u32 control; + if ((frame_size & FRAME_SIZE_MASK) == bt) + return; + /* * Disable the SPI controller. Writes to the frame size have * no effect when the controller is enabled. */ - mchp_corespi_disable(spi); + control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); mchp_corespi_write(spi, REG_FRAME_SIZE, bt); - control = mchp_corespi_read(spi, REG_CONTROL); control |= CONTROL_ENABLE; mchp_corespi_write(spi, REG_CONTROL, control); } @@ -249,8 +256,18 @@ static void mchp_corespi_set_cs(struct spi_device *spi, bool disable) reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT); reg &= ~BIT(spi_get_chipselect(spi, 0)); reg |= !disable << spi_get_chipselect(spi, 0); + corespi->pending_slave_select = reg; - mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg); + /* + * Only deassert chip select immediately. Writing to some registers + * requires the controller to be disabled, which results in the + * output pins being tristated and can cause the SCLK and MOSI lines + * to transition. Therefore asserting the chip select is deferred + * until just before writing to the TX FIFO, to ensure the device + * doesn't see any spurious clock transitions whilst CS is enabled. + */ + if (((spi->mode & SPI_CS_HIGH) == 0) == disable) + mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg); } static int mchp_corespi_setup(struct spi_device *spi) @@ -266,6 +283,7 @@ static int mchp_corespi_setup(struct spi_device *spi) if (spi->mode & SPI_CS_HIGH) { reg = mchp_corespi_read(corespi, REG_SLAVE_SELECT); reg |= BIT(spi_get_chipselect(spi, 0)); + corespi->pending_slave_select = reg; mchp_corespi_write(corespi, REG_SLAVE_SELECT, reg); } return 0; @@ -276,17 +294,13 @@ static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi * unsigned long clk_hz; u32 control = mchp_corespi_read(spi, REG_CONTROL); - control |= CONTROL_MASTER; + control &= ~CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); + control |= CONTROL_MASTER; control &= ~CONTROL_MODE_MASK; control |= MOTOROLA_MODE; - mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE); - - /* max. possible spi clock rate is the apb clock rate */ - clk_hz = clk_get_rate(spi->clk); - host->max_speed_hz = clk_hz; - /* * The controller must be configured so that it doesn't remove Chip * Select until the entire message has been transferred, even if at @@ -295,11 +309,16 @@ static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi * * BIGFIFO mode is also enabled, which sets the fifo depth to 32 frames * for the 8 bit transfers that this driver uses. */ - control = mchp_corespi_read(spi, REG_CONTROL); control |= CONTROL_SPS | CONTROL_BIGFIFO; mchp_corespi_write(spi, REG_CONTROL, control); + mchp_corespi_set_framesize(spi, DEFAULT_FRAMESIZE); + + /* max. possible spi clock rate is the apb clock rate */ + clk_hz = clk_get_rate(spi->clk); + host->max_speed_hz = clk_hz; + mchp_corespi_enable_ints(spi); /* @@ -307,7 +326,8 @@ static void mchp_corespi_init(struct spi_controller *host, struct mchp_corespi * * select is relinquished to the hardware. SSELOUT is enabled too so we * can deal with active high targets. */ - mchp_corespi_write(spi, REG_SLAVE_SELECT, SSELOUT | SSEL_DIRECT); + spi->pending_slave_select = SSELOUT | SSEL_DIRECT; + mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select); control = mchp_corespi_read(spi, REG_CONTROL); @@ -321,8 +341,6 @@ static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi) { u32 control; - mchp_corespi_disable(spi); - control = mchp_corespi_read(spi, REG_CONTROL); if (spi->clk_mode) control |= CONTROL_CLKMODE; @@ -331,12 +349,12 @@ static inline void mchp_corespi_set_clk_gen(struct mchp_corespi *spi) mchp_corespi_write(spi, REG_CLK_GEN, spi->clk_gen); mchp_corespi_write(spi, REG_CONTROL, control); - mchp_corespi_write(spi, REG_CONTROL, control | CONTROL_ENABLE); } static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int mode) { - u32 control, mode_val; + u32 mode_val; + u32 control = mchp_corespi_read(spi, REG_CONTROL); switch (mode & SPI_MODE_X_MASK) { case SPI_MODE_0: @@ -354,12 +372,13 @@ static inline void mchp_corespi_set_mode(struct mchp_corespi *spi, unsigned int } /* - * Disable the SPI controller. Writes to the frame size have + * Disable the SPI controller. Writes to the frame protocol have * no effect when the controller is enabled. */ - mchp_corespi_disable(spi); - control = mchp_corespi_read(spi, REG_CONTROL); + control &= ~CONTROL_ENABLE; + mchp_corespi_write(spi, REG_CONTROL, control); + control &= ~(SPI_MODE_X_MASK << MODE_X_MASK_SHIFT); control |= mode_val; @@ -380,21 +399,18 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id) if (intfield == 0) return IRQ_NONE; - if (intfield & INT_TXDONE) { + if (intfield & INT_TXDONE) mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE); + if (intfield & INT_RXRDY) { + mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY); + if (spi->rx_len) mchp_corespi_read_fifo(spi); - - if (spi->tx_len) - mchp_corespi_write_fifo(spi); - - if (!spi->rx_len) - finalise = true; } - if (intfield & INT_RXRDY) - mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY); + if (!spi->rx_len && !spi->tx_len) + finalise = true; if (intfield & INT_RX_CHANNEL_OVERFLOW) { mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW); @@ -479,8 +495,13 @@ static int mchp_corespi_transfer_one(struct spi_controller *host, mchp_corespi_set_xfer_size(spi, (spi->tx_len > FIFO_DEPTH) ? FIFO_DEPTH : spi->tx_len); - if (spi->tx_len) + mchp_corespi_write(spi, REG_COMMAND, COMMAND_RXFIFORST | COMMAND_TXFIFORST); + + mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select); + + while (spi->tx_len) mchp_corespi_write_fifo(spi); + return 1; } diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 95fb5f1c91c1..05e6d007f9a7 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -734,6 +734,7 @@ static const struct of_device_id spidev_dt_ids[] = { { .compatible = "lwn,bk4", .data = &spidev_of_check }, { .compatible = "menlo,m53cpld", .data = &spidev_of_check }, { .compatible = "micron,spi-authenta", .data = &spidev_of_check }, + { .compatible = "rohm,bh2228fv", .data = &spidev_of_check }, { .compatible = "rohm,dh2228fv", .data = &spidev_of_check }, { .compatible = "semtech,sx1301", .data = &spidev_of_check }, { .compatible = "silabs,em3581", .data = &spidev_of_check }, diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index 5c1cebe07580..3b1030fc4fbf 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -185,7 +185,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) return err; } - data->clk = devm_clk_get(&pdev->dev, NULL); + data->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(data->clk)) { err = PTR_ERR(data->clk); if (err != -EPROBE_DEFER) @@ -193,10 +193,6 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) return err; } - err = clk_prepare_enable(data->clk); - if (err) - return err; - rate = clk_get_rate(data->clk); if ((rate < 1920000) || (rate > 5000000)) dev_warn(&pdev->dev, @@ -211,7 +207,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Failed to register the thermal device: %d\n", err); - goto err_clk; + return err; } /* @@ -236,7 +232,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Not able to read trip_temp: %d\n", err); - goto err_tz; + return err; } /* set bandgap reference voltage and enable voltage regulator */ @@ -269,17 +265,11 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) */ err = thermal_add_hwmon_sysfs(tz); if (err) - goto err_tz; + return err; bcm2835_thermal_debugfs(pdev); return 0; -err_tz: - devm_thermal_of_zone_unregister(&pdev->dev, tz); -err_clk: - clk_disable_unprepare(data->clk); - - return err; } static void bcm2835_thermal_remove(struct platform_device *pdev) @@ -287,7 +277,6 @@ static void bcm2835_thermal_remove(struct platform_device *pdev) struct bcm2835_thermal_data *data = platform_get_drvdata(pdev); debugfs_remove_recursive(data->debugfsdir); - clk_disable_unprepare(data->clk); } static struct platform_driver bcm2835_thermal_driver = { diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 4e7fec406ee5..f2d31bc48f52 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -272,6 +272,44 @@ static int __init thermal_register_governors(void) return ret; } +static int __thermal_zone_device_set_mode(struct thermal_zone_device *tz, + enum thermal_device_mode mode) +{ + if (tz->ops.change_mode) { + int ret; + + ret = tz->ops.change_mode(tz, mode); + if (ret) + return ret; + } + + tz->mode = mode; + + return 0; +} + +static void thermal_zone_broken_disable(struct thermal_zone_device *tz) +{ + struct thermal_trip_desc *td; + + dev_err(&tz->device, "Unable to get temperature, disabling!\n"); + /* + * This function only runs for enabled thermal zones, so no need to + * check for the current mode. + */ + __thermal_zone_device_set_mode(tz, THERMAL_DEVICE_DISABLED); + thermal_notify_tz_disable(tz); + + for_each_trip_desc(tz, td) { + if (td->trip.type == THERMAL_TRIP_CRITICAL && + td->trip.temperature > THERMAL_TEMP_INVALID) { + dev_crit(&tz->device, + "Disabled thermal zone with critical trip point\n"); + return; + } + } +} + /* * Zone update section: main control loop applied to each zone while monitoring * in polling mode. The monitoring is done using a workqueue. @@ -292,6 +330,34 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, cancel_delayed_work(&tz->poll_queue); } +static void thermal_zone_recheck(struct thermal_zone_device *tz, int error) +{ + if (error == -EAGAIN) { + thermal_zone_device_set_polling(tz, THERMAL_RECHECK_DELAY); + return; + } + + /* + * Print the message once to reduce log noise. It will be followed by + * another one if the temperature cannot be determined after multiple + * attempts. + */ + if (tz->recheck_delay_jiffies == THERMAL_RECHECK_DELAY) + dev_info(&tz->device, "Temperature check failed (%d)\n", error); + + thermal_zone_device_set_polling(tz, tz->recheck_delay_jiffies); + + tz->recheck_delay_jiffies += max(tz->recheck_delay_jiffies >> 1, 1ULL); + if (tz->recheck_delay_jiffies > THERMAL_MAX_RECHECK_DELAY) { + thermal_zone_broken_disable(tz); + /* + * Restore the original recheck delay value to allow the thermal + * zone to try to recover when it is reenabled by user space. + */ + tz->recheck_delay_jiffies = THERMAL_RECHECK_DELAY; + } +} + static void monitor_thermal_zone(struct thermal_zone_device *tz) { if (tz->mode != THERMAL_DEVICE_ENABLED) @@ -488,10 +554,7 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, ret = __thermal_zone_get_temp(tz, &temp); if (ret) { - if (ret != -EAGAIN) - dev_info(&tz->device, "Temperature check failed (%d)\n", ret); - - thermal_zone_device_set_polling(tz, msecs_to_jiffies(THERMAL_RECHECK_DELAY_MS)); + thermal_zone_recheck(tz, ret); return; } else if (temp <= THERMAL_TEMP_INVALID) { /* @@ -503,6 +566,8 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, goto monitor; } + tz->recheck_delay_jiffies = THERMAL_RECHECK_DELAY; + tz->last_temperature = tz->temperature; tz->temperature = temp; @@ -537,7 +602,7 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, static int thermal_zone_device_set_mode(struct thermal_zone_device *tz, enum thermal_device_mode mode) { - int ret = 0; + int ret; mutex_lock(&tz->lock); @@ -545,14 +610,15 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz, if (mode == tz->mode) { mutex_unlock(&tz->lock); - return ret; + return 0; } - if (tz->ops.change_mode) - ret = tz->ops.change_mode(tz, mode); + ret = __thermal_zone_device_set_mode(tz, mode); + if (ret) { + mutex_unlock(&tz->lock); - if (!ret) - tz->mode = mode; + return ret; + } __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); @@ -563,7 +629,7 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz, else thermal_notify_tz_disable(tz); - return ret; + return 0; } int thermal_zone_device_enable(struct thermal_zone_device *tz) @@ -1433,6 +1499,7 @@ thermal_zone_device_register_with_trips(const char *type, thermal_set_delay_jiffies(&tz->passive_delay_jiffies, passive_delay); thermal_set_delay_jiffies(&tz->polling_delay_jiffies, polling_delay); + tz->recheck_delay_jiffies = THERMAL_RECHECK_DELAY; /* sys I/F */ /* Add nodes that are always present via .groups */ diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 5afd541d54b0..56113c9db575 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -67,6 +67,8 @@ struct thermal_governor { * @polling_delay_jiffies: number of jiffies to wait between polls when * checking whether trip points have been crossed (0 for * interrupt driven systems) + * @recheck_delay_jiffies: delay after a failed attempt to determine the zone + * temperature before trying again * @temperature: current temperature. This is only for core code, * drivers should use thermal_zone_get_temp() to get the * current temperature @@ -108,6 +110,7 @@ struct thermal_zone_device { int num_trips; unsigned long passive_delay_jiffies; unsigned long polling_delay_jiffies; + unsigned long recheck_delay_jiffies; int temperature; int last_temperature; int emul_temperature; @@ -137,10 +140,11 @@ struct thermal_zone_device { #define THERMAL_TEMP_INIT INT_MIN /* - * Default delay after a failing thermal zone temperature check before - * attempting to check it again. + * Default and maximum delay after a failed thermal zone temperature check + * before attempting to check it again (in jiffies). */ -#define THERMAL_RECHECK_DELAY_MS 250 +#define THERMAL_RECHECK_DELAY msecs_to_jiffies(250) +#define THERMAL_MAX_RECHECK_DELAY (120 * HZ) /* Default Thermal Governor */ #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index c532416aec22..408fef9c6fd6 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -230,8 +230,6 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) /* Operation and runtime registers configuration */ #define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i)) -#define MCQ_OPR_OFFSET_n(p, i) \ - (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i)) static void __iomem *mcq_opr_base(struct ufs_hba *hba, enum ufshcd_mcq_opr n, int i) @@ -342,10 +340,10 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr), MCQ_CFG_n(REG_SQUBA, i)); /* Submission Queue Doorbell Address Offset */ - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i), + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i), MCQ_CFG_n(REG_SQDAO, i)); /* Submission Queue Interrupt Status Address Offset */ - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i), + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i), MCQ_CFG_n(REG_SQISAO, i)); /* Completion Queue Lower Base Address */ @@ -355,10 +353,10 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr), MCQ_CFG_n(REG_CQUBA, i)); /* Completion Queue Doorbell Address Offset */ - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i), + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i), MCQ_CFG_n(REG_CQDAO, i)); /* Completion Queue Interrupt Status Address Offset */ - ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i), + ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i), MCQ_CFG_n(REG_CQISAO, i)); /* Save the base addresses for quicker access */ diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 05881153883e..dc1e345ab67e 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -50,6 +50,7 @@ #define PCI_DEVICE_ID_INTEL_DENVERTON_XHCI 0x19d0 #define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13 #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13 +#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_PCH_XHCI 0xa0ed #define PCI_DEVICE_ID_INTEL_COMET_LAKE_XHCI 0xa3af #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed @@ -373,7 +374,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_MISSING_CAS; if (pdev->vendor == PCI_VENDOR_ID_INTEL && - (pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || + (pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_PCH_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI)) xhci->quirks |= XHCI_RESET_TO_DEFAULT; diff --git a/drivers/usb/typec/mux/nb7vpq904m.c b/drivers/usb/typec/mux/nb7vpq904m.c index b17826713753..9fe4ce6f62ac 100644 --- a/drivers/usb/typec/mux/nb7vpq904m.c +++ b/drivers/usb/typec/mux/nb7vpq904m.c @@ -413,7 +413,7 @@ static int nb7vpq904m_probe(struct i2c_client *client) ret = nb7vpq904m_parse_data_lanes_mapping(nb7); if (ret) - return ret; + goto err_switch_put; ret = regulator_enable(nb7->vcc_supply); if (ret) @@ -456,6 +456,9 @@ static int nb7vpq904m_probe(struct i2c_client *client) gpiod_set_value(nb7->enable_gpio, 0); regulator_disable(nb7->vcc_supply); +err_switch_put: + typec_switch_put(nb7->typec_switch); + return ret; } @@ -469,6 +472,8 @@ static void nb7vpq904m_remove(struct i2c_client *client) gpiod_set_value(nb7->enable_gpio, 0); regulator_disable(nb7->vcc_supply); + + typec_switch_put(nb7->typec_switch); } static const struct i2c_device_id nb7vpq904m_table[] = { diff --git a/drivers/usb/typec/mux/ptn36502.c b/drivers/usb/typec/mux/ptn36502.c index 0ec86ef32a87..88136a6d6f31 100644 --- a/drivers/usb/typec/mux/ptn36502.c +++ b/drivers/usb/typec/mux/ptn36502.c @@ -322,8 +322,10 @@ static int ptn36502_probe(struct i2c_client *client) "Failed to acquire orientation-switch\n"); ret = regulator_enable(ptn->vdd18_supply); - if (ret) - return dev_err_probe(dev, ret, "Failed to enable vdd18\n"); + if (ret) { + ret = dev_err_probe(dev, ret, "Failed to enable vdd18\n"); + goto err_switch_put; + } ret = ptn36502_detect(ptn); if (ret) @@ -363,6 +365,9 @@ static int ptn36502_probe(struct i2c_client *client) err_disable_regulator: regulator_disable(ptn->vdd18_supply); +err_switch_put: + typec_switch_put(ptn->typec_switch); + return ret; } @@ -374,6 +379,8 @@ static void ptn36502_remove(struct i2c_client *client) typec_switch_unregister(ptn->sw); regulator_disable(ptn->vdd18_supply); + + typec_switch_put(ptn->typec_switch); } static const struct i2c_device_id ptn36502_table[] = { diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index ec20ecff85c7..bf664ec9341b 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -667,6 +667,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) } vsock->guest_cid = 0; /* no CID assigned yet */ + vsock->seqpacket_allow = false; atomic_set(&vsock->queued_replies, 0); @@ -810,8 +811,7 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features) goto err; } - if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET)) - vsock->seqpacket_allow = true; + vsock->seqpacket_allow = features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET); for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { vq = &vsock->vqs[i]; diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c index 8ab64ae4cad3..5a161750a3ae 100644 --- a/drivers/video/fbdev/vesafb.c +++ b/drivers/video/fbdev/vesafb.c @@ -271,7 +271,7 @@ static int vesafb_probe(struct platform_device *dev) if (si->orig_video_isVGA != VIDEO_TYPE_VLFB) return -ENODEV; - vga_compat = (si->capabilities & 2) ? 0 : 1; + vga_compat = !__screen_info_vbe_mode_nonvga(si); vesafb_fix.smem_start = si->lfb_base; vesafb_defined.bits_per_pixel = si->lfb_depth; if (15 == vesafb_defined.bits_per_pixel) diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c index 1741f98ca67c..7bce093316c4 100644 --- a/drivers/watchdog/rzg2l_wdt.c +++ b/drivers/watchdog/rzg2l_wdt.c @@ -123,8 +123,11 @@ static void rzg2l_wdt_init_timeout(struct watchdog_device *wdev) static int rzg2l_wdt_start(struct watchdog_device *wdev) { struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); + int ret; - pm_runtime_get_sync(wdev->parent); + ret = pm_runtime_resume_and_get(wdev->parent); + if (ret) + return ret; /* Initialize time out */ rzg2l_wdt_init_timeout(wdev); @@ -141,15 +144,21 @@ static int rzg2l_wdt_start(struct watchdog_device *wdev) static int rzg2l_wdt_stop(struct watchdog_device *wdev) { struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); + int ret; rzg2l_wdt_reset(priv); - pm_runtime_put(wdev->parent); + + ret = pm_runtime_put(wdev->parent); + if (ret < 0) + return ret; return 0; } static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout) { + int ret = 0; + wdev->timeout = timeout; /* @@ -158,11 +167,14 @@ static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int time * to reset the module) so that it is updated with new timeout values. */ if (watchdog_active(wdev)) { - rzg2l_wdt_stop(wdev); - rzg2l_wdt_start(wdev); + ret = rzg2l_wdt_stop(wdev); + if (ret) + return ret; + + ret = rzg2l_wdt_start(wdev); } - return 0; + return ret; } static int rzg2l_wdt_restart(struct watchdog_device *wdev, diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 6441e47d8a5e..7c22f5b8c535 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -514,6 +514,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, put_page(page); break; } + add_size = min(em->start + em->len, page_end + 1) - cur; free_extent_map(em); if (page->index == end_index) { @@ -526,7 +527,6 @@ static noinline int add_ra_bio_pages(struct inode *inode, } } - add_size = min(em->start + em->len, page_end + 1) - cur; ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur)); if (ret != add_size) { unlock_extent(tree, cur, page_end, NULL); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 885cb5d4e771..0cdf84cd1791 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -961,7 +961,8 @@ static int __init init_caches(void) if (!ceph_mds_request_cachep) goto bad_mds_req; - ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT); + ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, + (CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT) * sizeof(struct page *)); if (!ceph_wb_pagevec_pool) goto bad_pagevec_pool; diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c index b80f612867c2..9b53883e5caf 100644 --- a/fs/erofs/zutil.c +++ b/fs/erofs/zutil.c @@ -38,11 +38,13 @@ void *z_erofs_get_gbuf(unsigned int requiredpages) { struct z_erofs_gbuf *gbuf; + migrate_disable(); gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()]; spin_lock(&gbuf->lock); /* check if the buffer is too small */ if (requiredpages > gbuf->nrpages) { spin_unlock(&gbuf->lock); + migrate_enable(); /* (for sparse checker) pretend gbuf->lock is still taken */ __acquire(gbuf->lock); return NULL; @@ -57,6 +59,7 @@ void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock) gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()]; DBG_BUGON(gbuf->ptr != ptr); spin_unlock(&gbuf->lock); + migrate_enable(); } int z_erofs_gbuf_growsize(unsigned int nrpages) diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 84572e11cc05..7446bf09a04a 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -813,7 +813,7 @@ static int __exfat_get_dentry_set(struct exfat_entry_set_cache *es, num_bh = EXFAT_B_TO_BLK_ROUND_UP(off + num_entries * DENTRY_SIZE, sb); if (num_bh > ARRAY_SIZE(es->__bh)) { - es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_KERNEL); + es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_NOFS); if (!es->bh) { brelse(bh); return -ENOMEM; diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c index 1bfd6ab11038..b8cfab8f98b9 100644 --- a/fs/ext2/balloc.c +++ b/fs/ext2/balloc.c @@ -77,26 +77,33 @@ static int ext2_valid_block_bitmap(struct super_block *sb, ext2_grpblk_t next_zero_bit; ext2_fsblk_t bitmap_blk; ext2_fsblk_t group_first_block; + ext2_grpblk_t max_bit; group_first_block = ext2_group_first_block_no(sb, block_group); + max_bit = ext2_group_last_block_no(sb, block_group) - group_first_block; /* check whether block bitmap block number is set */ bitmap_blk = le32_to_cpu(desc->bg_block_bitmap); offset = bitmap_blk - group_first_block; - if (!ext2_test_bit(offset, bh->b_data)) + if (offset < 0 || offset > max_bit || + !ext2_test_bit(offset, bh->b_data)) /* bad block bitmap */ goto err_out; /* check whether the inode bitmap block number is set */ bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap); offset = bitmap_blk - group_first_block; - if (!ext2_test_bit(offset, bh->b_data)) + if (offset < 0 || offset > max_bit || + !ext2_test_bit(offset, bh->b_data)) /* bad block bitmap */ goto err_out; /* check whether the inode table block number is set */ bitmap_blk = le32_to_cpu(desc->bg_inode_table); offset = bitmap_blk - group_first_block; + if (offset < 0 || offset > max_bit || + offset + EXT2_SB(sb)->s_itb_per_group - 1 > max_bit) + goto err_out; next_zero_bit = ext2_find_next_zero_bit(bh->b_data, offset + EXT2_SB(sb)->s_itb_per_group, offset); diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 4a00e2f019d9..3a53dbb85e15 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -310,6 +310,8 @@ void ext4_es_find_extent_range(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t end, struct extent_status *es) { + es->es_lblk = es->es_len = es->es_pblk = 0; + if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return; diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index 87c009e0c59a..d3a67bc06d10 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -649,6 +649,12 @@ void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t star if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE)) return; + if (ext4_has_inline_data(inode)) { + ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, + handle); + return; + } + args.start = start; args.end = end; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index a630b27a4cc6..1311ad0464b2 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -151,10 +151,11 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode, return bh; } - if (!bh && (type == INDEX || type == DIRENT_HTREE)) { + /* The first directory block must not be a hole. */ + if (!bh && (type == INDEX || type == DIRENT_HTREE || block == 0)) { ext4_error_inode(inode, func, line, block, - "Directory hole found for htree %s block", - (type == INDEX) ? "index" : "leaf"); + "Directory hole found for htree %s block %u", + (type == INDEX) ? "index" : "leaf", block); return ERR_PTR(-EFSCORRUPTED); } if (!bh) @@ -2217,6 +2218,52 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname, return err ? err : err2; } +static bool ext4_check_dx_root(struct inode *dir, struct dx_root *root) +{ + struct fake_dirent *fde; + const char *error_msg; + unsigned int rlen; + unsigned int blocksize = dir->i_sb->s_blocksize; + char *blockend = (char *)root + dir->i_sb->s_blocksize; + + fde = &root->dot; + if (unlikely(fde->name_len != 1)) { + error_msg = "invalid name_len for '.'"; + goto corrupted; + } + if (unlikely(strncmp(root->dot_name, ".", fde->name_len))) { + error_msg = "invalid name for '.'"; + goto corrupted; + } + rlen = ext4_rec_len_from_disk(fde->rec_len, blocksize); + if (unlikely((char *)fde + rlen >= blockend)) { + error_msg = "invalid rec_len for '.'"; + goto corrupted; + } + + fde = &root->dotdot; + if (unlikely(fde->name_len != 2)) { + error_msg = "invalid name_len for '..'"; + goto corrupted; + } + if (unlikely(strncmp(root->dotdot_name, "..", fde->name_len))) { + error_msg = "invalid name for '..'"; + goto corrupted; + } + rlen = ext4_rec_len_from_disk(fde->rec_len, blocksize); + if (unlikely((char *)fde + rlen >= blockend)) { + error_msg = "invalid rec_len for '..'"; + goto corrupted; + } + + return true; + +corrupted: + EXT4_ERROR_INODE(dir, "Corrupt dir, %s, running e2fsck is recommended", + error_msg); + return false; +} + /* * This converts a one block unindexed directory to a 3 block indexed * directory, and adds the dentry to the indexed directory. @@ -2251,17 +2298,17 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname, brelse(bh); return retval; } + root = (struct dx_root *) bh->b_data; + if (!ext4_check_dx_root(dir, root)) { + brelse(bh); + return -EFSCORRUPTED; + } /* The 0th block becomes the root, move the dirents out */ fde = &root->dotdot; de = (struct ext4_dir_entry_2 *)((char *)fde + ext4_rec_len_from_disk(fde->rec_len, blocksize)); - if ((char *) de >= (((char *) root) + blocksize)) { - EXT4_ERROR_INODE(dir, "invalid rec_len for '..'"); - brelse(bh); - return -EFSCORRUPTED; - } len = ((char *) root) + (blocksize - csum_size) - (char *) de; /* Allocate new block for the 0th block's dirents */ @@ -3083,10 +3130,7 @@ bool ext4_empty_dir(struct inode *inode) EXT4_ERROR_INODE(inode, "invalid size"); return false; } - /* The first directory block must not be a hole, - * so treat it as DIRENT_HTREE - */ - bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE); + bh = ext4_read_dirblock(inode, 0, EITHER); if (IS_ERR(bh)) return false; @@ -3531,10 +3575,7 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle, struct ext4_dir_entry_2 *de; unsigned int offset; - /* The first directory block must not be a hole, so - * treat it as DIRENT_HTREE - */ - bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE); + bh = ext4_read_dirblock(inode, 0, EITHER); if (IS_ERR(bh)) { *retval = PTR_ERR(bh); return NULL; diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 6460879b9fcb..46ce2f21fef9 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1433,6 +1433,12 @@ static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode, goto out; memcpy(bh->b_data, buf, csize); + /* + * Zero out block tail to avoid writing uninitialized memory + * to disk. + */ + if (csize < blocksize) + memset(bh->b_data + csize, 0, blocksize - csize); set_buffer_uptodate(bh); ext4_handle_dirty_metadata(handle, ea_inode, bh); diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 55d444bec5c0..b52d10b457fe 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1186,6 +1186,11 @@ static void __prepare_cp_block(struct f2fs_sb_info *sbi) ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); ckpt->next_free_nid = cpu_to_le32(last_nid); + + /* update user_block_counts */ + sbi->last_valid_block_count = sbi->total_valid_block_count; + percpu_counter_set(&sbi->alloc_valid_block_count, 0); + percpu_counter_set(&sbi->rf_node_block_count, 0); } static bool __need_flush_quota(struct f2fs_sb_info *sbi) @@ -1575,11 +1580,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) start_blk += NR_CURSEG_NODE_TYPE; } - /* update user_block_counts */ - sbi->last_valid_block_count = sbi->total_valid_block_count; - percpu_counter_set(&sbi->alloc_valid_block_count, 0); - percpu_counter_set(&sbi->rf_node_block_count, 0); - /* Here, we have one bio having CP pack except cp pack 2 page */ f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); /* Wait for all dirty meta pages to be submitted for IO */ diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index b9b0debc6b3d..467f67cf2b38 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -925,6 +925,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) #ifdef CONFIG_BLK_DEV_ZONED static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr) { + struct block_device *bdev = sbi->sb->s_bdev; int devi = 0; if (f2fs_is_multi_device(sbi)) { @@ -935,8 +936,9 @@ static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr) return false; } blkaddr -= FDEV(devi).start_blk; + bdev = FDEV(devi).bdev; } - return bdev_is_zoned(FDEV(devi).bdev) && + return bdev_is_zoned(bdev) && f2fs_blkz_is_seq(sbi, devi, blkaddr) && (blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1); } @@ -2601,7 +2603,7 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio) return true; if (IS_NOQUOTA(inode)) return true; - if (f2fs_is_atomic_file(inode)) + if (f2fs_used_in_atomic_write(inode)) return true; /* rewrite low ratio compress data w/ OPU mode to avoid fragmentation */ if (f2fs_compressed_file(inode) && @@ -2688,7 +2690,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio) } /* wait for GCed page writeback via META_MAPPING */ - if (fio->post_read) + if (fio->meta_gc) f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); /* @@ -2783,7 +2785,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted, .submitted = 0, .compr_blocks = compr_blocks, .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY, - .post_read = f2fs_post_read_required(inode) ? 1 : 0, + .meta_gc = f2fs_meta_inode_gc_required(inode) ? 1 : 0, .io_type = io_type, .io_wbc = wbc, .bio = bio, diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 1974b6aff397..66680159a296 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -803,6 +803,7 @@ enum { FI_COW_FILE, /* indicate COW file */ FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */ FI_ATOMIC_REPLACE, /* indicate atomic replace */ + FI_OPENED_FILE, /* indicate file has been opened */ FI_MAX, /* max flag, never be used */ }; @@ -842,7 +843,11 @@ struct f2fs_inode_info { struct task_struct *atomic_write_task; /* store atomic write task */ struct extent_tree *extent_tree[NR_EXTENT_CACHES]; /* cached extent_tree entry */ - struct inode *cow_inode; /* copy-on-write inode for atomic write */ + union { + struct inode *cow_inode; /* copy-on-write inode for atomic write */ + struct inode *atomic_inode; + /* point to atomic_inode, available only for cow_inode */ + }; /* avoid racing between foreground op and gc */ struct f2fs_rwsem i_gc_rwsem[2]; @@ -1210,7 +1215,7 @@ struct f2fs_io_info { unsigned int in_list:1; /* indicate fio is in io_list */ unsigned int is_por:1; /* indicate IO is from recovery or not */ unsigned int encrypted:1; /* indicate file is encrypted */ - unsigned int post_read:1; /* require post read */ + unsigned int meta_gc:1; /* require meta inode GC */ enum iostat_type io_type; /* io type */ struct writeback_control *io_wbc; /* writeback control */ struct bio **bio; /* bio for ipu */ @@ -4261,6 +4266,16 @@ static inline bool f2fs_post_read_required(struct inode *inode) f2fs_compressed_file(inode); } +static inline bool f2fs_used_in_atomic_write(struct inode *inode) +{ + return f2fs_is_atomic_file(inode) || f2fs_is_cow_file(inode); +} + +static inline bool f2fs_meta_inode_gc_required(struct inode *inode) +{ + return f2fs_post_read_required(inode) || f2fs_used_in_atomic_write(inode); +} + /* * compress.c */ diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 5c0b281a70f3..387ce167dda1 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -554,6 +554,42 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) return 0; } +static int finish_preallocate_blocks(struct inode *inode) +{ + int ret; + + inode_lock(inode); + if (is_inode_flag_set(inode, FI_OPENED_FILE)) { + inode_unlock(inode); + return 0; + } + + if (!file_should_truncate(inode)) { + set_inode_flag(inode, FI_OPENED_FILE); + inode_unlock(inode); + return 0; + } + + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + filemap_invalidate_lock(inode->i_mapping); + + truncate_setsize(inode, i_size_read(inode)); + ret = f2fs_truncate(inode); + + filemap_invalidate_unlock(inode->i_mapping); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + + if (!ret) + set_inode_flag(inode, FI_OPENED_FILE); + + inode_unlock(inode); + if (ret) + return ret; + + file_dont_truncate(inode); + return 0; +} + static int f2fs_file_open(struct inode *inode, struct file *filp) { int err = fscrypt_file_open(inode, filp); @@ -571,7 +607,11 @@ static int f2fs_file_open(struct inode *inode, struct file *filp) filp->f_mode |= FMODE_NOWAIT; filp->f_mode |= FMODE_CAN_ODIRECT; - return dquot_file_open(inode, filp); + err = dquot_file_open(inode, filp); + if (err) + return err; + + return finish_preallocate_blocks(inode); } void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) @@ -825,6 +865,8 @@ static bool f2fs_force_buffered_io(struct inode *inode, int rw) return true; if (f2fs_compressed_file(inode)) return true; + if (f2fs_has_inline_data(inode)) + return true; /* disallow direct IO if any of devices has unaligned blksize */ if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize) @@ -2141,6 +2183,9 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate) set_inode_flag(fi->cow_inode, FI_COW_FILE); clear_inode_flag(fi->cow_inode, FI_INLINE_DATA); + + /* Set the COW inode's atomic_inode to the atomic inode */ + F2FS_I(fi->cow_inode)->atomic_inode = inode; } else { /* Reuse the already created COW inode */ ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true); diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 6066c6eecf41..b2951cd930d8 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -1171,7 +1171,8 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, static int ra_data_block(struct inode *inode, pgoff_t index) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct address_space *mapping = inode->i_mapping; + struct address_space *mapping = f2fs_is_cow_file(inode) ? + F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping; struct dnode_of_data dn; struct page *page; struct f2fs_io_info fio = { @@ -1260,6 +1261,8 @@ static int ra_data_block(struct inode *inode, pgoff_t index) static int move_data_block(struct inode *inode, block_t bidx, int gc_type, unsigned int segno, int off) { + struct address_space *mapping = f2fs_is_cow_file(inode) ? + F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping; struct f2fs_io_info fio = { .sbi = F2FS_I_SB(inode), .ino = inode->i_ino, @@ -1282,7 +1285,7 @@ static int move_data_block(struct inode *inode, block_t bidx, CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA; /* do not read out */ - page = f2fs_grab_cache_page(inode->i_mapping, bidx, false); + page = f2fs_grab_cache_page(mapping, bidx, false); if (!page) return -ENOMEM; @@ -1579,7 +1582,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, start_bidx = f2fs_start_bidx_of_node(nofs, inode) + ofs_in_node; - if (f2fs_post_read_required(inode)) { + if (f2fs_meta_inode_gc_required(inode)) { int err = ra_data_block(inode, start_bidx); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); @@ -1630,7 +1633,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, start_bidx = f2fs_start_bidx_of_node(nofs, inode) + ofs_in_node; - if (f2fs_post_read_required(inode)) + if (f2fs_meta_inode_gc_required(inode)) err = move_data_block(inode, start_bidx, gc_type, segno, off); else @@ -1638,7 +1641,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, segno, off); if (!err && (gc_type == FG_GC || - f2fs_post_read_required(inode))) + f2fs_meta_inode_gc_required(inode))) submitted++; if (locked) { diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 7638d0d7b7ee..215daa71dc18 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -16,7 +16,7 @@ static bool support_inline_data(struct inode *inode) { - if (f2fs_is_atomic_file(inode)) + if (f2fs_used_in_atomic_write(inode)) return false; if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) return false; @@ -203,8 +203,10 @@ int f2fs_convert_inline_inode(struct inode *inode) struct page *ipage, *page; int err = 0; - if (!f2fs_has_inline_data(inode) || - f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb)) + if (f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb)) + return -EROFS; + + if (!f2fs_has_inline_data(inode)) return 0; err = f2fs_dquot_initialize(inode); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 005dde72aff3..c6b55aedc276 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -29,6 +29,9 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) if (is_inode_flag_set(inode, FI_NEW_INODE)) return; + if (f2fs_readonly(F2FS_I_SB(inode)->sb)) + return; + if (f2fs_inode_dirtied(inode, sync)) return; @@ -610,14 +613,6 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) } f2fs_set_inode_flags(inode); - if (file_should_truncate(inode) && - !is_sbi_flag_set(sbi, SBI_POR_DOING)) { - ret = f2fs_truncate(inode); - if (ret) - goto bad_inode; - file_dont_truncate(inode); - } - unlock_new_inode(inode); trace_f2fs_iget(inode); return inode; @@ -813,8 +808,9 @@ void f2fs_evict_inode(struct inode *inode) f2fs_abort_atomic_write(inode, true); - if (fi->cow_inode) { + if (fi->cow_inode && f2fs_is_cow_file(fi->cow_inode)) { clear_inode_flag(fi->cow_inode, FI_COW_FILE); + F2FS_I(fi->cow_inode)->atomic_inode = NULL; iput(fi->cow_inode); fi->cow_inode = NULL; } diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index a0ce3d080f80..259e235becc5 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -3828,7 +3828,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio) goto drop_bio; } - if (fio->post_read) + if (fio->meta_gc) f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1); stat_inc_inplace_blocks(fio->sbi); @@ -3998,7 +3998,7 @@ void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr) struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct page *cpage; - if (!f2fs_post_read_required(inode)) + if (!f2fs_meta_inode_gc_required(inode)) return; if (!__is_valid_data_blkaddr(blkaddr)) @@ -4017,7 +4017,7 @@ void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); block_t i; - if (!f2fs_post_read_required(inode)) + if (!f2fs_meta_inode_gc_required(inode)) return; for (i = 0; i < len; i++) diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index e1c0f418aa11..bfc01a521cb9 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -347,7 +347,8 @@ static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi, unsigned int segno, bool use_section) { if (use_section && __is_large_section(sbi)) { - unsigned int start_segno = START_SEGNO(segno); + unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); + unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); unsigned int blocks = 0; int i; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 99e44ea7d875..32fe6fa72f46 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -755,6 +755,8 @@ static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param) struct fs_parse_result result; struct fuse_fs_context *ctx = fsc->fs_private; int opt; + kuid_t kuid; + kgid_t kgid; if (fsc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { /* @@ -799,16 +801,30 @@ static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param) break; case OPT_USER_ID: - ctx->user_id = make_kuid(fsc->user_ns, result.uint_32); - if (!uid_valid(ctx->user_id)) + kuid = make_kuid(fsc->user_ns, result.uint_32); + if (!uid_valid(kuid)) return invalfc(fsc, "Invalid user_id"); + /* + * The requested uid must be representable in the + * filesystem's idmapping. + */ + if (!kuid_has_mapping(fsc->user_ns, kuid)) + return invalfc(fsc, "Invalid user_id"); + ctx->user_id = kuid; ctx->user_id_present = true; break; case OPT_GROUP_ID: - ctx->group_id = make_kgid(fsc->user_ns, result.uint_32); - if (!gid_valid(ctx->group_id)) + kgid = make_kgid(fsc->user_ns, result.uint_32);; + if (!gid_valid(kgid)) + return invalfc(fsc, "Invalid group_id"); + /* + * The requested gid must be representable in the + * filesystem's idmapping. + */ + if (!kgid_has_mapping(fsc->user_ns, kgid)) return invalfc(fsc, "Invalid group_id"); + ctx->group_id = kgid; ctx->group_id_present = true; break; diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 8c34798a0715..744e10b46904 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -200,6 +200,7 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t HFS_I(inode)->flags = 0; HFS_I(inode)->rsrc_inode = NULL; HFS_I(inode)->fs_blocks = 0; + HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60; if (S_ISDIR(mode)) { inode->i_size = 2; HFS_SB(sb)->folder_count++; @@ -275,6 +276,8 @@ void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext, for (count = 0, i = 0; i < 3; i++) count += be16_to_cpu(ext[i].count); HFS_I(inode)->first_blocks = count; + HFS_I(inode)->cached_start = 0; + HFS_I(inode)->cached_blocks = 0; inode->i_size = HFS_I(inode)->phys_size = log_size; HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c index ca2ba8c9f82e..901e83d65d20 100644 --- a/fs/hfsplus/bfind.c +++ b/fs/hfsplus/bfind.c @@ -25,19 +25,8 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) fd->key = ptr + tree->max_key_len + 2; hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); - switch (tree->cnid) { - case HFSPLUS_CAT_CNID: - mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX); - break; - case HFSPLUS_EXT_CNID: - mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX); - break; - case HFSPLUS_ATTR_CNID: - mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX); - break; - default: - BUG(); - } + mutex_lock_nested(&tree->tree_lock, + hfsplus_btree_lock_class(tree)); return 0; } diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index 3c572e44f2ad..9c51867dddc5 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -430,7 +430,8 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid, hfsplus_free_extents(sb, ext_entry, total_blocks - start, total_blocks); total_blocks = start; - mutex_lock(&fd.tree->tree_lock); + mutex_lock_nested(&fd.tree->tree_lock, + hfsplus_btree_lock_class(fd.tree)); } while (total_blocks > blocks); hfs_find_exit(&fd); @@ -592,7 +593,8 @@ void hfsplus_file_truncate(struct inode *inode) alloc_cnt, alloc_cnt - blk_cnt); hfsplus_dump_extent(hip->first_extents); hip->first_blocks = blk_cnt; - mutex_lock(&fd.tree->tree_lock); + mutex_lock_nested(&fd.tree->tree_lock, + hfsplus_btree_lock_class(fd.tree)); break; } res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt); @@ -606,7 +608,8 @@ void hfsplus_file_truncate(struct inode *inode) hfsplus_free_extents(sb, hip->cached_extents, alloc_cnt - start, alloc_cnt - blk_cnt); hfsplus_dump_extent(hip->cached_extents); - mutex_lock(&fd.tree->tree_lock); + mutex_lock_nested(&fd.tree->tree_lock, + hfsplus_btree_lock_class(fd.tree)); if (blk_cnt > start) { hip->extent_state |= HFSPLUS_EXT_DIRTY; break; diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 012a3d003fbe..9e78f181c24f 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -553,6 +553,27 @@ static inline __be32 __hfsp_ut2mt(time64_t ut) return cpu_to_be32(lower_32_bits(ut) + HFSPLUS_UTC_OFFSET); } +static inline enum hfsplus_btree_mutex_classes +hfsplus_btree_lock_class(struct hfs_btree *tree) +{ + enum hfsplus_btree_mutex_classes class; + + switch (tree->cnid) { + case HFSPLUS_CAT_CNID: + class = CATALOG_BTREE_MUTEX; + break; + case HFSPLUS_EXT_CNID: + class = EXTENTS_BTREE_MUTEX; + break; + case HFSPLUS_ATTR_CNID: + class = ATTR_BTREE_MUTEX; + break; + default: + BUG(); + } + return class; +} + /* compatibility */ #define hfsp_mt2ut(t) (struct timespec64){ .tv_sec = __hfsp_mt2ut(t) } #define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec) diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h index 0239e3af3945..8b39c15c408c 100644 --- a/fs/hostfs/hostfs.h +++ b/fs/hostfs/hostfs.h @@ -63,9 +63,10 @@ struct hostfs_stat { struct hostfs_timespec atime, mtime, ctime; unsigned int blksize; unsigned long long blocks; - unsigned int maj; - unsigned int min; - dev_t dev; + struct { + unsigned int maj; + unsigned int min; + } rdev, dev; }; extern int stat_file(const char *path, struct hostfs_stat *p, int fd); diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index a73d27c4dd58..2c4d503a62e0 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -530,10 +530,11 @@ static int hostfs_inode_update(struct inode *ino, const struct hostfs_stat *st) static int hostfs_inode_set(struct inode *ino, void *data) { struct hostfs_stat *st = data; - dev_t rdev; + dev_t dev, rdev; /* Reencode maj and min with the kernel encoding.*/ - rdev = MKDEV(st->maj, st->min); + rdev = MKDEV(st->rdev.maj, st->rdev.min); + dev = MKDEV(st->dev.maj, st->dev.min); switch (st->mode & S_IFMT) { case S_IFLNK: @@ -559,7 +560,7 @@ static int hostfs_inode_set(struct inode *ino, void *data) return -EIO; } - HOSTFS_I(ino)->dev = st->dev; + HOSTFS_I(ino)->dev = dev; ino->i_ino = st->ino; ino->i_mode = st->mode; return hostfs_inode_update(ino, st); @@ -568,8 +569,9 @@ static int hostfs_inode_set(struct inode *ino, void *data) static int hostfs_inode_test(struct inode *inode, void *data) { const struct hostfs_stat *st = data; + dev_t dev = MKDEV(st->dev.maj, st->dev.min); - return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == st->dev; + return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev; } static struct inode *hostfs_iget(struct super_block *sb, char *name) diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c index 840619e39a1a..97e9c40a9448 100644 --- a/fs/hostfs/hostfs_user.c +++ b/fs/hostfs/hostfs_user.c @@ -34,9 +34,10 @@ static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p) p->mtime.tv_nsec = 0; p->blksize = buf->st_blksize; p->blocks = buf->st_blocks; - p->maj = os_major(buf->st_rdev); - p->min = os_minor(buf->st_rdev); - p->dev = buf->st_dev; + p->rdev.maj = os_major(buf->st_rdev); + p->rdev.min = os_minor(buf->st_rdev); + p->dev.maj = os_major(buf->st_dev); + p->dev.min = os_minor(buf->st_dev); } int stat_file(const char *path, struct hostfs_stat *p, int fd) diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 75ea4e9a5cab..e7fc912693bd 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -766,7 +766,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) if (first_block < journal->j_tail) freed += journal->j_last - journal->j_first; /* Update tail only if we free significant amount of space */ - if (freed < jbd2_journal_get_max_txn_bufs(journal)) + if (freed < journal->j_max_transaction_buffers) update_tail = 0; } J_ASSERT(commit_transaction->t_state == T_COMMIT); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 03c4b9214f56..ae5b544ed0cc 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1451,6 +1451,48 @@ static int journal_revoke_records_per_block(journal_t *journal) return space / record_size; } +static int jbd2_journal_get_max_txn_bufs(journal_t *journal) +{ + return (journal->j_total_len - journal->j_fc_wbufsize) / 4; +} + +/* + * Base amount of descriptor blocks we reserve for each transaction. + */ +static int jbd2_descriptor_blocks_per_trans(journal_t *journal) +{ + int tag_space = journal->j_blocksize - sizeof(journal_header_t); + int tags_per_block; + + /* Subtract UUID */ + tag_space -= 16; + if (jbd2_journal_has_csum_v2or3(journal)) + tag_space -= sizeof(struct jbd2_journal_block_tail); + /* Commit code leaves a slack space of 16 bytes at the end of block */ + tags_per_block = (tag_space - 16) / journal_tag_bytes(journal); + /* + * Revoke descriptors are accounted separately so we need to reserve + * space for commit block and normal transaction descriptor blocks. + */ + return 1 + DIV_ROUND_UP(jbd2_journal_get_max_txn_bufs(journal), + tags_per_block); +} + +/* + * Initialize number of blocks each transaction reserves for its bookkeeping + * and maximum number of blocks a transaction can use. This needs to be called + * after the journal size and the fastcommit area size are initialized. + */ +static void jbd2_journal_init_transaction_limits(journal_t *journal) +{ + journal->j_revoke_records_per_block = + journal_revoke_records_per_block(journal); + journal->j_transaction_overhead_buffers = + jbd2_descriptor_blocks_per_trans(journal); + journal->j_max_transaction_buffers = + jbd2_journal_get_max_txn_bufs(journal); +} + /* * Load the on-disk journal superblock and read the key fields into the * journal_t. @@ -1492,8 +1534,8 @@ static int journal_load_superblock(journal_t *journal) if (jbd2_journal_has_csum_v2or3(journal)) journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid, sizeof(sb->s_uuid)); - journal->j_revoke_records_per_block = - journal_revoke_records_per_block(journal); + /* After journal features are set, we can compute transaction limits */ + jbd2_journal_init_transaction_limits(journal); if (jbd2_has_feature_fast_commit(journal)) { journal->j_fc_last = be32_to_cpu(sb->s_maxlen); @@ -1743,8 +1785,6 @@ static int journal_reset(journal_t *journal) journal->j_commit_sequence = journal->j_transaction_sequence - 1; journal->j_commit_request = journal->j_commit_sequence; - journal->j_max_transaction_buffers = jbd2_journal_get_max_txn_bufs(journal); - /* * Now that journal recovery is done, turn fast commits off here. This * way, if fast commit was enabled before the crash but if now FS has @@ -2285,8 +2325,6 @@ jbd2_journal_initialize_fast_commit(journal_t *journal) journal->j_fc_first = journal->j_last + 1; journal->j_fc_off = 0; journal->j_free = journal->j_last - journal->j_first; - journal->j_max_transaction_buffers = - jbd2_journal_get_max_txn_bufs(journal); return 0; } @@ -2374,8 +2412,7 @@ int jbd2_journal_set_features(journal_t *journal, unsigned long compat, sb->s_feature_ro_compat |= cpu_to_be32(ro); sb->s_feature_incompat |= cpu_to_be32(incompat); unlock_buffer(journal->j_sb_buffer); - journal->j_revoke_records_per_block = - journal_revoke_records_per_block(journal); + jbd2_journal_init_transaction_limits(journal); return 1; #undef COMPAT_FEATURE_ON @@ -2406,8 +2443,7 @@ void jbd2_journal_clear_features(journal_t *journal, unsigned long compat, sb->s_feature_compat &= ~cpu_to_be32(compat); sb->s_feature_ro_compat &= ~cpu_to_be32(ro); sb->s_feature_incompat &= ~cpu_to_be32(incompat); - journal->j_revoke_records_per_block = - journal_revoke_records_per_block(journal); + jbd2_journal_init_transaction_limits(journal); } EXPORT_SYMBOL(jbd2_journal_clear_features); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index cb0b8d6fc0c6..66513c18ca29 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -62,28 +62,6 @@ void jbd2_journal_free_transaction(transaction_t *transaction) kmem_cache_free(transaction_cache, transaction); } -/* - * Base amount of descriptor blocks we reserve for each transaction. - */ -static int jbd2_descriptor_blocks_per_trans(journal_t *journal) -{ - int tag_space = journal->j_blocksize - sizeof(journal_header_t); - int tags_per_block; - - /* Subtract UUID */ - tag_space -= 16; - if (jbd2_journal_has_csum_v2or3(journal)) - tag_space -= sizeof(struct jbd2_journal_block_tail); - /* Commit code leaves a slack space of 16 bytes at the end of block */ - tags_per_block = (tag_space - 16) / journal_tag_bytes(journal); - /* - * Revoke descriptors are accounted separately so we need to reserve - * space for commit block and normal transaction descriptor blocks. - */ - return 1 + DIV_ROUND_UP(journal->j_max_transaction_buffers, - tags_per_block); -} - /* * jbd2_get_transaction: obtain a new transaction_t object. * @@ -109,7 +87,7 @@ static void jbd2_get_transaction(journal_t *journal, transaction->t_expires = jiffies + journal->j_commit_interval; atomic_set(&transaction->t_updates, 0); atomic_set(&transaction->t_outstanding_credits, - jbd2_descriptor_blocks_per_trans(journal) + + journal->j_transaction_overhead_buffers + atomic_read(&journal->j_reserved_credits)); atomic_set(&transaction->t_outstanding_revokes, 0); atomic_set(&transaction->t_handle_count, 0); @@ -213,6 +191,13 @@ static void sub_reserved_credits(journal_t *journal, int blocks) wake_up(&journal->j_wait_reserved); } +/* Maximum number of blocks for user transaction payload */ +static int jbd2_max_user_trans_buffers(journal_t *journal) +{ + return journal->j_max_transaction_buffers - + journal->j_transaction_overhead_buffers; +} + /* * Wait until we can add credits for handle to the running transaction. Called * with j_state_lock held for reading. Returns 0 if handle joined the running @@ -262,12 +247,12 @@ __must_hold(&journal->j_state_lock) * big to fit this handle? Wait until reserved credits are freed. */ if (atomic_read(&journal->j_reserved_credits) + total > - journal->j_max_transaction_buffers) { + jbd2_max_user_trans_buffers(journal)) { read_unlock(&journal->j_state_lock); jbd2_might_wait_for_commit(journal); wait_event(journal->j_wait_reserved, atomic_read(&journal->j_reserved_credits) + total <= - journal->j_max_transaction_buffers); + jbd2_max_user_trans_buffers(journal)); __acquire(&journal->j_state_lock); /* fake out sparse */ return 1; } @@ -307,14 +292,14 @@ __must_hold(&journal->j_state_lock) needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits); /* We allow at most half of a transaction to be reserved */ - if (needed > journal->j_max_transaction_buffers / 2) { + if (needed > jbd2_max_user_trans_buffers(journal) / 2) { sub_reserved_credits(journal, rsv_blocks); atomic_sub(total, &t->t_outstanding_credits); read_unlock(&journal->j_state_lock); jbd2_might_wait_for_commit(journal); wait_event(journal->j_wait_reserved, atomic_read(&journal->j_reserved_credits) + rsv_blocks - <= journal->j_max_transaction_buffers / 2); + <= jbd2_max_user_trans_buffers(journal) / 2); __acquire(&journal->j_state_lock); /* fake out sparse */ return 1; } @@ -344,12 +329,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle, * size and limit the number of total credits to not exceed maximum * transaction size per operation. */ - if ((rsv_blocks > journal->j_max_transaction_buffers / 2) || - (rsv_blocks + blocks > journal->j_max_transaction_buffers)) { + if (rsv_blocks > jbd2_max_user_trans_buffers(journal) / 2 || + rsv_blocks + blocks > jbd2_max_user_trans_buffers(journal)) { printk(KERN_ERR "JBD2: %s wants too many credits " "credits:%d rsv_credits:%d max:%d\n", current->comm, blocks, rsv_blocks, - journal->j_max_transaction_buffers); + jbd2_max_user_trans_buffers(journal)); WARN_ON(1); return -ENOSPC; } diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index 2ec35889ad24..1407feccbc2d 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c @@ -290,7 +290,7 @@ int diSync(struct inode *ipimap) int diRead(struct inode *ip) { struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); - int iagno, ino, extno, rc; + int iagno, ino, extno, rc, agno; struct inode *ipimap; struct dinode *dp; struct iag *iagp; @@ -339,8 +339,11 @@ int diRead(struct inode *ip) /* get the ag for the iag */ agstart = le64_to_cpu(iagp->agstart); + agno = BLKTOAG(agstart, JFS_SBI(ip->i_sb)); release_metapage(mp); + if (agno >= MAXAG || agno < 0) + return -EIO; rel_inode = (ino & (INOSPERPAGE - 1)); pageno = blkno >> sbi->l2nbperpage; diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c index d7c971df8866..32bc88bee5d1 100644 --- a/fs/netfs/write_issue.c +++ b/fs/netfs/write_issue.c @@ -122,6 +122,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, wreq->io_streams[1].transferred = LONG_MAX; if (fscache_resources_valid(&wreq->cache_resources)) { wreq->io_streams[1].avail = true; + wreq->io_streams[1].active = true; wreq->io_streams[1].prepare_write = wreq->cache_resources.ops->prepare_write_subreq; wreq->io_streams[1].issue_write = wreq->cache_resources.ops->issue_write; } diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 6bd127e6683d..445db17f1b6c 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -434,7 +434,7 @@ static void nfs_invalidate_folio(struct folio *folio, size_t offset, /* Cancel any unstarted writes on this page */ nfs_wb_folio_cancel(inode, folio); folio_wait_private_2(folio); /* [DEPRECATED] */ - trace_nfs_invalidate_folio(inode, folio); + trace_nfs_invalidate_folio(inode, folio_pos(folio) + offset, length); } /* @@ -502,7 +502,8 @@ static int nfs_launder_folio(struct folio *folio) folio_wait_private_2(folio); /* [DEPRECATED] */ ret = nfs_wb_folio(inode, folio); - trace_nfs_launder_folio_done(inode, folio, ret); + trace_nfs_launder_folio_done(inode, folio_pos(folio), + folio_size(folio), ret); return ret; } diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 84573df5cf5a..83378f69b35e 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -231,9 +231,8 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init) __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags); __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags); __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags); - - if (test_bit(NFS_CS_DS, &cl_init->init_flags)) - __set_bit(NFS_CS_DS, &clp->cl_flags); + if (test_bit(NFS_CS_PNFS, &cl_init->init_flags)) + __set_bit(NFS_CS_PNFS, &clp->cl_flags); /* * Set up the connection to the server before we add add to the * global list. @@ -1013,7 +1012,6 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv, if (mds_srv->flags & NFS_MOUNT_NORESVPORT) __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); - __set_bit(NFS_CS_DS, &cl_init.init_flags); __set_bit(NFS_CS_PNFS, &cl_init.init_flags); cl_init.max_connect = NFS_MAX_TRANSPORTS; /* diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index a691fa10b3e9..bff9d6600741 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -8840,7 +8840,7 @@ nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred, #ifdef CONFIG_NFS_V4_1_MIGRATION calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR; #endif - if (test_bit(NFS_CS_DS, &clp->cl_flags)) + if (test_bit(NFS_CS_PNFS, &clp->cl_flags)) calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS; msg.rpc_argp = &calldata->args; msg.rpc_resp = &calldata->res; diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 1e710654af11..352fdaed4075 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -939,10 +939,11 @@ TRACE_EVENT(nfs_sillyrename_unlink, DECLARE_EVENT_CLASS(nfs_folio_event, TP_PROTO( const struct inode *inode, - struct folio *folio + loff_t offset, + size_t count ), - TP_ARGS(inode, folio), + TP_ARGS(inode, offset, count), TP_STRUCT__entry( __field(dev_t, dev) @@ -950,7 +951,7 @@ DECLARE_EVENT_CLASS(nfs_folio_event, __field(u64, fileid) __field(u64, version) __field(loff_t, offset) - __field(u32, count) + __field(size_t, count) ), TP_fast_assign( @@ -960,13 +961,13 @@ DECLARE_EVENT_CLASS(nfs_folio_event, __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->version = inode_peek_iversion_raw(inode); - __entry->offset = folio_file_pos(folio); - __entry->count = nfs_folio_length(folio); + __entry->offset = offset, + __entry->count = count; ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu " - "offset=%lld count=%u", + "offset=%lld count=%zu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->version, @@ -978,18 +979,20 @@ DECLARE_EVENT_CLASS(nfs_folio_event, DEFINE_EVENT(nfs_folio_event, name, \ TP_PROTO( \ const struct inode *inode, \ - struct folio *folio \ + loff_t offset, \ + size_t count \ ), \ - TP_ARGS(inode, folio)) + TP_ARGS(inode, offset, count)) DECLARE_EVENT_CLASS(nfs_folio_event_done, TP_PROTO( const struct inode *inode, - struct folio *folio, + loff_t offset, + size_t count, int ret ), - TP_ARGS(inode, folio, ret), + TP_ARGS(inode, offset, count, ret), TP_STRUCT__entry( __field(dev_t, dev) @@ -998,7 +1001,7 @@ DECLARE_EVENT_CLASS(nfs_folio_event_done, __field(u64, fileid) __field(u64, version) __field(loff_t, offset) - __field(u32, count) + __field(size_t, count) ), TP_fast_assign( @@ -1008,14 +1011,14 @@ DECLARE_EVENT_CLASS(nfs_folio_event_done, __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->version = inode_peek_iversion_raw(inode); - __entry->offset = folio_file_pos(folio); - __entry->count = nfs_folio_length(folio); + __entry->offset = offset, + __entry->count = count, __entry->ret = ret; ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu " - "offset=%lld count=%u ret=%d", + "offset=%lld count=%zu ret=%d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->version, @@ -1027,10 +1030,11 @@ DECLARE_EVENT_CLASS(nfs_folio_event_done, DEFINE_EVENT(nfs_folio_event_done, name, \ TP_PROTO( \ const struct inode *inode, \ - struct folio *folio, \ + loff_t offset, \ + size_t count, \ int ret \ ), \ - TP_ARGS(inode, folio, ret)) + TP_ARGS(inode, offset, count, ret)) DEFINE_NFS_FOLIO_EVENT(nfs_aop_readpage); DEFINE_NFS_FOLIO_EVENT_DONE(nfs_aop_readpage_done); diff --git a/fs/nfs/read.c b/fs/nfs/read.c index a142287d86f6..88e6a78d37fb 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -332,13 +332,15 @@ int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio, int nfs_read_folio(struct file *file, struct folio *folio) { struct inode *inode = file_inode(file); + loff_t pos = folio_pos(folio); + size_t len = folio_size(folio); struct nfs_pageio_descriptor pgio; struct nfs_open_context *ctx; int ret; - trace_nfs_aop_readpage(inode, folio); + trace_nfs_aop_readpage(inode, pos, len); nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); - task_io_account_read(folio_size(folio)); + task_io_account_read(len); /* * Try to flush any pending writes to the file.. @@ -381,7 +383,7 @@ int nfs_read_folio(struct file *file, struct folio *folio) out_put: put_nfs_open_context(ctx); out: - trace_nfs_aop_readpage_done(inode, folio, ret); + trace_nfs_aop_readpage_done(inode, pos, len, ret); return ret; out_unlock: folio_unlock(folio); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 2329cbb0e446..75c1b3c7faea 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -2073,17 +2073,17 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio) */ int nfs_wb_folio(struct inode *inode, struct folio *folio) { - loff_t range_start = folio_file_pos(folio); - loff_t range_end = range_start + (loff_t)folio_size(folio) - 1; + loff_t range_start = folio_pos(folio); + size_t len = folio_size(folio); struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, .range_start = range_start, - .range_end = range_end, + .range_end = range_start + len - 1, }; int ret; - trace_nfs_writeback_folio(inode, folio); + trace_nfs_writeback_folio(inode, range_start, len); for (;;) { folio_wait_writeback(folio); @@ -2101,7 +2101,7 @@ int nfs_wb_folio(struct inode *inode, struct folio *folio) goto out_error; } out_error: - trace_nfs_writeback_folio_done(inode, folio, ret); + trace_nfs_writeback_folio_done(inode, range_start, len, ret); return ret; } diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index 272ab8d5c4d7..ec2ab6429e00 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig @@ -162,7 +162,7 @@ config NFSD_V4_SECURITY_LABEL config NFSD_LEGACY_CLIENT_TRACKING bool "Support legacy NFSv4 client tracking methods (DEPRECATED)" depends on NFSD_V4 - default n + default y help The NFSv4 server needs to store a small amount of information on stable storage in order to handle state recovery after reboot. Most diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index ad9083ca144b..f4704f5d4086 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -664,7 +664,7 @@ static int nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg, void *data) { - struct file_lock *fl = data; + struct file_lease *fl = data; /* Only close files for F_SETLEASE leases */ if (fl->c.flc_flags & FL_LEASE) diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 46bd20fe5c0f..2e39cf2e502a 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -2269,7 +2269,7 @@ nfsd4_layoutget(struct svc_rqst *rqstp, const struct nfsd4_layout_ops *ops; struct nfs4_layout_stateid *ls; __be32 nfserr; - int accmode = NFSD_MAY_READ_IF_EXEC; + int accmode = NFSD_MAY_READ_IF_EXEC | NFSD_MAY_OWNER_OVERRIDE; switch (lgp->lg_seg.iomode) { case IOMODE_READ: @@ -2359,7 +2359,8 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp, struct nfs4_layout_stateid *ls; __be32 nfserr; - nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_WRITE); + nfserr = fh_verify(rqstp, current_fh, 0, + NFSD_MAY_WRITE | NFSD_MAY_OWNER_OVERRIDE); if (nfserr) goto out; diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 2c060e0b1604..67d8673a9391 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -2086,8 +2086,8 @@ nfsd4_client_tracking_init(struct net *net) status = nn->client_tracking_ops->init(net); out: if (status) { - printk(KERN_WARNING "NFSD: Unable to initialize client " - "recovery tracking! (%d)\n", status); + pr_warn("NFSD: Unable to initialize client recovery tracking! (%d)\n", status); + pr_warn("NFSD: Is nfsdcld running? If not, enable CONFIG_NFSD_LEGACY_CLIENT_TRACKING.\n"); nn->client_tracking_ops = NULL; } return status; diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 0131d83b912d..c034080c334b 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -51,12 +51,21 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); if (unlikely(!bh)) - return NULL; + return ERR_PTR(-ENOMEM); if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) || buffer_dirty(bh))) { - brelse(bh); - BUG(); + /* + * The block buffer at the specified new address was already + * in use. This can happen if it is a virtual block number + * and has been reallocated due to corruption of the bitmap + * used to manage its allocation state (if not, the buffer + * clearing of an abandoned b-tree node is missing somewhere). + */ + nilfs_error(inode->i_sb, + "state inconsistency probably due to duplicate use of b-tree node block address %llu (ino=%lu)", + (unsigned long long)blocknr, inode->i_ino); + goto failed; } memset(bh->b_data, 0, i_blocksize(inode)); bh->b_bdev = inode->i_sb->s_bdev; @@ -67,6 +76,12 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) folio_unlock(bh->b_folio); folio_put(bh->b_folio); return bh; + +failed: + folio_unlock(bh->b_folio); + folio_put(bh->b_folio); + brelse(bh); + return ERR_PTR(-EIO); } int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, @@ -217,8 +232,8 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, } nbh = nilfs_btnode_create_block(btnc, newkey); - if (!nbh) - return -ENOMEM; + if (IS_ERR(nbh)) + return PTR_ERR(nbh); BUG_ON(nbh == obh); ctxt->newbh = nbh; diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index a139970e4804..862bdf23120e 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -63,8 +63,8 @@ static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, struct buffer_head *bh; bh = nilfs_btnode_create_block(btnc, ptr); - if (!bh) - return -ENOMEM; + if (IS_ERR(bh)) + return PTR_ERR(bh); set_buffer_nilfs_volatile(bh); *bhp = bh; diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 6ea81f1d5094..d02fd92cdb43 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -136,7 +136,7 @@ static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int); #define nilfs_cnt32_ge(a, b) \ (typecheck(__u32, a) && typecheck(__u32, b) && \ - ((__s32)(a) - (__s32)(b) >= 0)) + ((__s32)((a) - (b)) >= 0)) static int nilfs_prepare_segment_lock(struct super_block *sb, struct nilfs_transaction_info *ti) diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c index 8e6bcdf99770..a810ef547501 100644 --- a/fs/ntfs3/attrib.c +++ b/fs/ntfs3/attrib.c @@ -231,7 +231,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr, struct ntfs_sb_info *sbi; struct ATTRIB *attr_s; struct MFT_REC *rec; - u32 used, asize, rsize, aoff, align; + u32 used, asize, rsize, aoff; bool is_data; CLST len, alen; char *next; @@ -252,10 +252,13 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr, rsize = le32_to_cpu(attr->res.data_size); is_data = attr->type == ATTR_DATA && !attr->name_len; - align = sbi->cluster_size; - if (is_attr_compressed(attr)) - align <<= COMPRESSION_UNIT; - len = (rsize + align - 1) >> sbi->cluster_bits; + /* len - how many clusters required to store 'rsize' bytes */ + if (is_attr_compressed(attr)) { + u8 shift = sbi->cluster_bits + NTFS_LZNT_CUNIT; + len = ((rsize + (1u << shift) - 1) >> shift) << NTFS_LZNT_CUNIT; + } else { + len = bytes_to_cluster(sbi, rsize); + } run_init(run); @@ -670,7 +673,8 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, goto undo_2; } - if (!is_mft) + /* keep runs for $MFT::$ATTR_DATA and $MFT::$ATTR_BITMAP. */ + if (ni->mi.rno != MFT_REC_MFT) run_truncate_head(run, evcn + 1); svcn = le64_to_cpu(attr->nres.svcn); @@ -972,6 +976,19 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, if (err) goto out; + /* Check for compressed frame. */ + err = attr_is_frame_compressed(ni, attr, vcn >> NTFS_LZNT_CUNIT, &hint); + if (err) + goto out; + + if (hint) { + /* if frame is compressed - don't touch it. */ + *lcn = COMPRESSED_LCN; + *len = hint; + err = -EOPNOTSUPP; + goto out; + } + if (!*len) { if (run_lookup_entry(run, vcn, lcn, len, NULL)) { if (*lcn != SPARSE_LCN || !new) @@ -1722,6 +1739,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size, attr_b->nres.total_size = cpu_to_le64(total_size); inode_set_bytes(&ni->vfs_inode, total_size); + ni->ni_flags |= NI_FLAG_UPDATE_PARENT; mi_b->dirty = true; mark_inode_dirty(&ni->vfs_inode); diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c index c9eb01ccee51..cf4fe21a5039 100644 --- a/fs/ntfs3/bitmap.c +++ b/fs/ntfs3/bitmap.c @@ -1382,7 +1382,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits) err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes); if (err) - break; + return err; bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits); if (!bh) diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c index 1937e8e612f8..858efe255f6f 100644 --- a/fs/ntfs3/dir.c +++ b/fs/ntfs3/dir.c @@ -326,7 +326,8 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni, * It does additional locks/reads just to get the type of name. * Should we use additional mount option to enable branch below? */ - if ((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) && + if (((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) || + fname->dup.ea_size) && ino != ni->mi.rno) { struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL); if (!IS_ERR_OR_NULL(inode)) { diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index 2f903b6ce157..9ae202901f3c 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -299,10 +299,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) } if (ni->i_valid < to) { - if (!inode_trylock(inode)) { - err = -EAGAIN; - goto out; - } + inode_lock(inode); err = ntfs_extend_initialized_size(file, ni, ni->i_valid, to); inode_unlock(inode); diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index 0008670939a4..4822cfd6351c 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -1501,7 +1501,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type, if (is_ext) { if (flags & ATTR_FLAG_COMPRESSED) - attr->nres.c_unit = COMPRESSION_UNIT; + attr->nres.c_unit = NTFS_LZNT_CUNIT; attr->nres.total_size = attr->nres.alloc_size; } diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c index 40926bf392d2..fcb3e49911ad 100644 --- a/fs/ntfs3/fslog.c +++ b/fs/ntfs3/fslog.c @@ -2996,7 +2996,7 @@ static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi, if (is_ext) { attr->name_off = SIZEOF_NONRESIDENT_EX_LE; if (is_attr_compressed(attr)) - attr->nres.c_unit = COMPRESSION_UNIT; + attr->nres.c_unit = NTFS_LZNT_CUNIT; attr->nres.run_off = cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size); @@ -3922,6 +3922,9 @@ int log_replay(struct ntfs_inode *ni, bool *initialized) goto out; } + log->page_mask = log->page_size - 1; + log->page_bits = blksize_bits(log->page_size); + /* If the file size has shrunk then we won't mount it. */ if (log->l_size < le64_to_cpu(ra2->l_size)) { err = -EINVAL; diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index d0f15bbf78f6..9089c58a005c 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -978,7 +978,7 @@ static struct indx_node *indx_new(struct ntfs_index *indx, hdr->used = cpu_to_le32(eo + sizeof(struct NTFS_DE) + sizeof(u64)); de_set_vbn_le(e, *sub_vbn); - hdr->flags = 1; + hdr->flags = NTFS_INDEX_HDR_HAS_SUBNODES; } else { e->size = cpu_to_le16(sizeof(struct NTFS_DE)); hdr->used = cpu_to_le32(eo + sizeof(struct NTFS_DE)); @@ -1683,7 +1683,7 @@ static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni, e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64)); e->flags = NTFS_IE_HAS_SUBNODES | NTFS_IE_LAST; - hdr->flags = 1; + hdr->flags = NTFS_INDEX_HDR_HAS_SUBNODES; hdr->used = hdr->total = cpu_to_le32(new_root_size - offsetof(struct INDEX_ROOT, ihdr)); diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index 0f1664db94ad..9559d72f8660 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -1508,7 +1508,7 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir, attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8); attr->name_off = SIZEOF_NONRESIDENT_EX_LE; attr->flags = ATTR_FLAG_COMPRESSED; - attr->nres.c_unit = COMPRESSION_UNIT; + attr->nres.c_unit = NTFS_LZNT_CUNIT; asize = SIZEOF_NONRESIDENT_EX + 8; } else { attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8); @@ -1668,7 +1668,9 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir, * The packed size of extended attribute is stored in direntry too. * 'fname' here points to inside new_de. */ - ntfs_save_wsl_perm(inode, &fname->dup.ea_size); + err = ntfs_save_wsl_perm(inode, &fname->dup.ea_size); + if (err) + goto out6; /* * update ea_size in file_name attribute too. @@ -1712,6 +1714,12 @@ int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir, goto out2; out6: + attr = ni_find_attr(ni, NULL, NULL, ATTR_EA, NULL, 0, NULL, NULL); + if (attr && attr->non_res) { + /* Delete ATTR_EA, if non-resident. */ + attr_set_size(ni, ATTR_EA, NULL, 0, NULL, 0, NULL, false, NULL); + } + if (rp_inserted) ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref); @@ -2133,5 +2141,6 @@ const struct address_space_operations ntfs_aops = { const struct address_space_operations ntfs_aops_cmpr = { .read_folio = ntfs_read_folio, .readahead = ntfs_readahead, + .dirty_folio = block_dirty_folio, }; // clang-format on diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h index 3d6143c7abc0..e1889ad09230 100644 --- a/fs/ntfs3/ntfs.h +++ b/fs/ntfs3/ntfs.h @@ -82,9 +82,6 @@ typedef u32 CLST; #define RESIDENT_LCN ((CLST)-2) #define COMPRESSED_LCN ((CLST)-3) -#define COMPRESSION_UNIT 4 -#define COMPRESS_MAX_CLUSTER 0x1000 - enum RECORD_NUM { MFT_REC_MFT = 0, MFT_REC_MIRR = 1, @@ -696,14 +693,15 @@ static inline bool de_has_vcn_ex(const struct NTFS_DE *e) offsetof(struct ATTR_FILE_NAME, name) + \ NTFS_NAME_LEN * sizeof(short), 8) +#define NTFS_INDEX_HDR_HAS_SUBNODES cpu_to_le32(1) + struct INDEX_HDR { __le32 de_off; // 0x00: The offset from the start of this structure // to the first NTFS_DE. __le32 used; // 0x04: The size of this structure plus all // entries (quad-word aligned). __le32 total; // 0x08: The allocated size of for this structure plus all entries. - u8 flags; // 0x0C: 0x00 = Small directory, 0x01 = Large directory. - u8 res[3]; + __le32 flags; // 0x0C: 0x00 = Small directory, 0x01 = Large directory. // // de_off + used <= total @@ -751,7 +749,7 @@ static inline struct NTFS_DE *hdr_next_de(const struct INDEX_HDR *hdr, static inline bool hdr_has_subnode(const struct INDEX_HDR *hdr) { - return hdr->flags & 1; + return hdr->flags & NTFS_INDEX_HDR_HAS_SUBNODES; } struct INDEX_BUFFER { @@ -771,7 +769,7 @@ static inline bool ib_is_empty(const struct INDEX_BUFFER *ib) static inline bool ib_is_leaf(const struct INDEX_BUFFER *ib) { - return !(ib->ihdr.flags & 1); + return !(ib->ihdr.flags & NTFS_INDEX_HDR_HAS_SUBNODES); } /* Index root structure ( 0x90 ). */ diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c index 27fbde2701b6..02b6f51ce650 100644 --- a/fs/ntfs3/super.c +++ b/fs/ntfs3/super.c @@ -275,7 +275,7 @@ static const struct fs_parameter_spec ntfs_fs_parameters[] = { fsparam_flag_no("acl", Opt_acl), fsparam_string("iocharset", Opt_iocharset), fsparam_flag_no("prealloc", Opt_prealloc), - fsparam_flag_no("nocase", Opt_nocase), + fsparam_flag_no("case", Opt_nocase), {} }; // clang-format on @@ -468,7 +468,7 @@ static int ntfs3_volinfo(struct seq_file *m, void *o) struct super_block *sb = m->private; struct ntfs_sb_info *sbi = sb->s_fs_info; - seq_printf(m, "ntfs%d.%d\n%u\n%zu\n\%zu\n%zu\n%s\n%s\n", + seq_printf(m, "ntfs%d.%d\n%u\n%zu\n%zu\n%zu\n%s\n%s\n", sbi->volume.major_ver, sbi->volume.minor_ver, sbi->cluster_size, sbi->used.bitmap.nbits, sbi->mft.bitmap.nbits, diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index b1c2c0b82116..dd7b462387a0 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -476,12 +476,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb, make_empty_dir_inode(inode); } + inode->i_uid = GLOBAL_ROOT_UID; + inode->i_gid = GLOBAL_ROOT_GID; if (root->set_ownership) root->set_ownership(head, &inode->i_uid, &inode->i_gid); - else { - inode->i_uid = GLOBAL_ROOT_UID; - inode->i_gid = GLOBAL_ROOT_GID; - } return inode; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 71e5039d940d..a45f2da0ada0 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1418,7 +1418,6 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, { u64 frame = 0, flags = 0; struct page *page = NULL; - bool migration = false; if (pte_present(pte)) { if (pm->show_pfn) @@ -1450,7 +1449,6 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, (offset << MAX_SWAPFILES_SHIFT); } flags |= PM_SWAP; - migration = is_migration_entry(entry); if (is_pfn_swap_entry(entry)) page = pfn_swap_entry_to_page(entry); if (pte_marker_entry_uffd_wp(entry)) @@ -1459,7 +1457,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, if (page && !PageAnon(page)) flags |= PM_FILE; - if (page && !migration && page_mapcount(page) == 1) + if (page && (flags & PM_PRESENT) && page_mapcount(page) == 1) flags |= PM_MMAP_EXCLUSIVE; if (vma->vm_flags & VM_SOFTDIRTY) flags |= PM_SOFT_DIRTY; @@ -1476,10 +1474,10 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, pte_t *pte, *orig_pte; int err = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE - bool migration = false; ptl = pmd_trans_huge_lock(pmdp, vma); if (ptl) { + unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT; u64 flags = 0, frame = 0; pmd_t pmd = *pmdp; struct page *page = NULL; @@ -1496,8 +1494,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, if (pmd_uffd_wp(pmd)) flags |= PM_UFFD_WP; if (pm->show_pfn) - frame = pmd_pfn(pmd) + - ((addr & ~PMD_MASK) >> PAGE_SHIFT); + frame = pmd_pfn(pmd) + idx; } #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION else if (is_swap_pmd(pmd)) { @@ -1506,11 +1503,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, if (pm->show_pfn) { if (is_pfn_swap_entry(entry)) - offset = swp_offset_pfn(entry); + offset = swp_offset_pfn(entry) + idx; else - offset = swp_offset(entry); - offset = offset + - ((addr & ~PMD_MASK) >> PAGE_SHIFT); + offset = swp_offset(entry) + idx; frame = swp_type(entry) | (offset << MAX_SWAPFILES_SHIFT); } @@ -1520,17 +1515,22 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, if (pmd_swp_uffd_wp(pmd)) flags |= PM_UFFD_WP; VM_BUG_ON(!is_pmd_migration_entry(pmd)); - migration = is_migration_entry(entry); page = pfn_swap_entry_to_page(entry); } #endif - if (page && !migration && page_mapcount(page) == 1) - flags |= PM_MMAP_EXCLUSIVE; + if (page && !PageAnon(page)) + flags |= PM_FILE; + + for (; addr != end; addr += PAGE_SIZE, idx++) { + unsigned long cur_flags = flags; + pagemap_entry_t pme; - for (; addr != end; addr += PAGE_SIZE) { - pagemap_entry_t pme = make_pme(frame, flags); + if (page && (flags & PM_PRESENT) && + page_mapcount(page + idx) == 1) + cur_flags |= PM_MMAP_EXCLUSIVE; + pme = make_pme(frame, cur_flags); err = add_to_pagemap(&pme, pm); if (err) break; diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index c92937bed133..2c4b357d85e2 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -1894,12 +1894,12 @@ init_cifs(void) WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); if (!serverclose_wq) { rc = -ENOMEM; - goto out_destroy_serverclose_wq; + goto out_destroy_deferredclose_wq; } rc = cifs_init_inodecache(); if (rc) - goto out_destroy_deferredclose_wq; + goto out_destroy_serverclose_wq; rc = cifs_init_netfs(); if (rc) @@ -1967,6 +1967,8 @@ init_cifs(void) cifs_destroy_netfs(); out_destroy_inodecache: cifs_destroy_inodecache(); +out_destroy_serverclose_wq: + destroy_workqueue(serverclose_wq); out_destroy_deferredclose_wq: destroy_workqueue(deferredclose_wq); out_destroy_cifsoplockd_wq: @@ -1977,8 +1979,6 @@ init_cifs(void) destroy_workqueue(decrypt_wq); out_destroy_cifsiod_wq: destroy_workqueue(cifsiod_wq); -out_destroy_serverclose_wq: - destroy_workqueue(serverclose_wq); out_clean_proc: cifs_proc_clean(); return rc; diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index 7a16e12f5da8..d2307162a2de 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -2614,6 +2614,13 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n"); rc = -EOPNOTSUPP; goto out_fail; + } else if (ses->server->vals->protocol_id == SMB10_PROT_ID) + if (cap_unix(ses)) + cifs_dbg(FYI, "Unix Extensions requested on SMB1 mount\n"); + else { + cifs_dbg(VFS, "SMB1 Unix Extensions not supported by server\n"); + rc = -EOPNOTSUPP; + goto out_fail; } else { cifs_dbg(VFS, "Check vers= mount option. SMB3.11 disabled but required for POSIX extensions\n"); @@ -3686,6 +3693,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) } #endif +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * Issue a TREE_CONNECT request. */ @@ -3807,11 +3815,25 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, else tcon->Flags = 0; cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags); - } + /* + * reset_cifs_unix_caps calls QFSInfo which requires + * need_reconnect to be false, but we would not need to call + * reset_caps if this were not a reconnect case so must check + * need_reconnect flag here. The caller will also clear + * need_reconnect when tcon was successful but needed to be + * cleared earlier in the case of unix extensions reconnect + */ + if (tcon->need_reconnect && tcon->unix_ext) { + cifs_dbg(FYI, "resetting caps for %s\n", tcon->tree_name); + tcon->need_reconnect = false; + reset_cifs_unix_caps(xid, tcon, NULL, NULL); + } + } cifs_buf_release(smb_buffer); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static void delayed_free(struct rcu_head *p) { diff --git a/fs/super.c b/fs/super.c index 095ba793e10c..38d72a3cf6fc 100644 --- a/fs/super.c +++ b/fs/super.c @@ -736,6 +736,17 @@ struct super_block *sget_fc(struct fs_context *fc, struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns; int err; + /* + * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is + * not set, as the filesystem is likely unprepared to handle it. + * This can happen when fsconfig() is called from init_user_ns with + * an fs_fd opened in another user namespace. + */ + if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) { + errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed"); + return ERR_PTR(-EPERM); + } + retry: spin_lock(&sb_lock); if (test) { diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index ab3ffc355949..558ad046972a 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -64,8 +64,12 @@ static int read_block_bitmap(struct super_block *sb, } for (i = 0; i < count; i++) - if (udf_test_bit(i + off, bh->b_data)) + if (udf_test_bit(i + off, bh->b_data)) { + bitmap->s_block_bitmap[bitmap_nr] = + ERR_PTR(-EFSCORRUPTED); + brelse(bh); return -EFSCORRUPTED; + } return 0; } @@ -81,8 +85,15 @@ static int __load_block_bitmap(struct super_block *sb, block_group, nr_groups); } - if (bitmap->s_block_bitmap[block_group]) + if (bitmap->s_block_bitmap[block_group]) { + /* + * The bitmap failed verification in the past. No point in + * trying again. + */ + if (IS_ERR(bitmap->s_block_bitmap[block_group])) + return PTR_ERR(bitmap->s_block_bitmap[block_group]); return block_group; + } retval = read_block_bitmap(sb, bitmap, block_group, block_group); if (retval < 0) diff --git a/fs/udf/file.c b/fs/udf/file.c index 97c59585208c..3a4179de316b 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -232,7 +232,9 @@ static int udf_setattr(struct mnt_idmap *idmap, struct dentry *dentry, if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { + filemap_invalidate_lock(inode->i_mapping); error = udf_setsize(inode, attr->ia_size); + filemap_invalidate_unlock(inode->i_mapping); if (error) return error; } diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 2fb21c5ffccf..08767bd21eb7 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1250,7 +1250,6 @@ int udf_setsize(struct inode *inode, loff_t newsize) if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; - filemap_invalidate_lock(inode->i_mapping); iinfo = UDF_I(inode); if (newsize > inode->i_size) { if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { @@ -1263,11 +1262,11 @@ int udf_setsize(struct inode *inode, loff_t newsize) } err = udf_expand_file_adinicb(inode); if (err) - goto out_unlock; + return err; } err = udf_extend_file(inode, newsize); if (err) - goto out_unlock; + return err; set_size: truncate_setsize(inode, newsize); } else { @@ -1285,14 +1284,14 @@ int udf_setsize(struct inode *inode, loff_t newsize) err = block_truncate_page(inode->i_mapping, newsize, udf_get_block); if (err) - goto out_unlock; + return err; truncate_setsize(inode, newsize); down_write(&iinfo->i_data_sem); udf_clear_extent_cache(inode); err = udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); if (err) - goto out_unlock; + return err; } update_time: inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); @@ -1300,8 +1299,6 @@ int udf_setsize(struct inode *inode, loff_t newsize) udf_sync_inode(inode); else mark_inode_dirty(inode); -out_unlock: - filemap_invalidate_unlock(inode->i_mapping); return err; } diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 1308109fd42d..78a603129dd5 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -876,8 +876,6 @@ static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir, if (has_diriter) { diriter.fi.icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location); - udf_update_tag((char *)&diriter.fi, - udf_dir_entry_len(&diriter.fi)); udf_fiiter_write_fi(&diriter, NULL); udf_fiiter_release(&diriter); } diff --git a/fs/udf/super.c b/fs/udf/super.c index 9381a66c6ce5..92d477053905 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -336,7 +336,8 @@ static void udf_sb_free_bitmap(struct udf_bitmap *bitmap) int nr_groups = bitmap->s_nr_groups; for (i = 0; i < nr_groups; i++) - brelse(bitmap->s_block_bitmap[i]); + if (!IS_ERR_OR_NULL(bitmap->s_block_bitmap[i])) + brelse(bitmap->s_block_bitmap[i]); kvfree(bitmap); } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 5703526d6ebf..70bf1004076b 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -103,7 +103,7 @@ #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* -#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral* +#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral* #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* #else #define TEXT_MAIN .text diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 82b1cc434ea3..e0f56564bf97 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -314,17 +314,17 @@ int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi, * @dsi: DSI peripheral device * @seq: buffer containing the payload */ -#define mipi_dsi_generic_write_seq(dsi, seq...) \ - do { \ - static const u8 d[] = { seq }; \ - struct device *dev = &dsi->dev; \ - int ret; \ - ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \ - if (ret < 0) { \ - dev_err_ratelimited(dev, "transmit data failed: %d\n", \ - ret); \ - return ret; \ - } \ +#define mipi_dsi_generic_write_seq(dsi, seq...) \ + do { \ + static const u8 d[] = { seq }; \ + struct device *dev = &dsi->dev; \ + ssize_t ret; \ + ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) { \ + dev_err_ratelimited(dev, "transmit data failed: %zd\n", \ + ret); \ + return ret; \ + } \ } while (0) /** @@ -333,18 +333,18 @@ int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi, * @cmd: Command * @seq: buffer containing data to be transmitted */ -#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \ - do { \ - static const u8 d[] = { cmd, seq }; \ - struct device *dev = &dsi->dev; \ - int ret; \ - ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ - if (ret < 0) { \ - dev_err_ratelimited( \ - dev, "sending command %#02x failed: %d\n", \ - cmd, ret); \ - return ret; \ - } \ +#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \ + do { \ + static const u8 d[] = { cmd, seq }; \ + struct device *dev = &dsi->dev; \ + ssize_t ret; \ + ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) { \ + dev_err_ratelimited( \ + dev, "sending command %#02x failed: %zd\n", \ + cmd, ret); \ + return ret; \ + } \ } while (0) /** diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index abd24016a900..8c61ccd161ba 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -122,7 +122,7 @@ static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag "alloc_tag was not cleared (got tag for %s:%u)\n", ref->ct->filename, ref->ct->lineno); - WARN_ONCE(!tag, "current->alloc_tag not set"); + WARN_ONCE(!tag, "current->alloc_tag not set\n"); } static inline void alloc_tag_sub_check(union codetag_ref *ref) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e4070fb02b11..ff2a6cdb1fa3 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -846,7 +846,7 @@ static inline u32 type_flag(u32 type) /* only use after check_attach_btf_id() */ static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog) { - return prog->type == BPF_PROG_TYPE_EXT ? + return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ? prog->aux->dst_prog->type : prog->type; } diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 00abe0e5d602..1cca14cf5652 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -462,9 +462,8 @@ struct fw_iso_packet { /* rx: Sync bit, wait for matching sy */ u32 tag:2; /* tx: Tag in packet header */ u32 sy:4; /* tx: Sy in packet header */ - u32 header_length:8; /* Length of immediate header */ - /* tx: Top of 1394 isoch. data_block */ - u32 header[] __counted_by(header_length); + u32 header_length:8; /* Size of immediate header */ + u32 header[]; /* tx: Top of 1394 isoch. data_block */ }; #define FW_ISO_CONTEXT_TRANSMIT 0 diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 2aa986a5cd1b..c73ad77fa33d 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -72,14 +72,20 @@ extern struct kobj_attribute shmem_enabled_attr; #define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1))) /* - * Mask of all large folio orders supported for file THP. + * Mask of all large folio orders supported for file THP. Folios in a DAX + * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to + * it. */ -#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER)) +#define THP_ORDERS_ALL_FILE_DAX \ + (BIT(PMD_ORDER) | BIT(PUD_ORDER)) +#define THP_ORDERS_ALL_FILE_DEFAULT \ + ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0)) /* * Mask of all large folio orders supported for THP. */ -#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE) +#define THP_ORDERS_ALL \ + (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DAX | THP_ORDERS_ALL_FILE_DEFAULT) #define TVA_SMAPS (1 << 0) /* Will be used for procfs */ #define TVA_IN_PF (1 << 1) /* Page fault handler */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 2b3c3a404769..8120d1976188 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -681,6 +681,7 @@ HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) /* Defines one hugetlb page size */ struct hstate { struct mutex resize_lock; + struct lock_class_key resize_key; int next_nid_to_alloc; int next_nid_to_free; unsigned int order; diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 5c9bdd3ffccc..dac7466de5f3 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -168,7 +168,7 @@ static inline int __must_check request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev) { - return request_threaded_irq(irq, handler, NULL, flags, name, dev); + return request_threaded_irq(irq, handler, NULL, flags | IRQF_COND_ONESHOT, name, dev); } extern int __must_check diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index ab04c1c27fae..b900c642210c 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1085,6 +1085,13 @@ struct journal_s */ int j_revoke_records_per_block; + /** + * @j_transaction_overhead: + * + * Number of blocks each transaction needs for its own bookkeeping + */ + int j_transaction_overhead_buffers; + /** * @j_commit_interval: * @@ -1660,11 +1667,6 @@ int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode); int jbd2_fc_wait_bufs(journal_t *journal, int num_blks); int jbd2_fc_release_bufs(journal_t *journal); -static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal) -{ - return (journal->j_total_len - journal->j_fc_wbufsize) / 4; -} - /* * is_journal_abort * diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 44488b1ab9a9..855db460e08b 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -144,6 +144,7 @@ LSM_HOOK(int, 0, inode_setattr, struct mnt_idmap *idmap, struct dentry *dentry, LSM_HOOK(void, LSM_RET_VOID, inode_post_setattr, struct mnt_idmap *idmap, struct dentry *dentry, int ia_valid) LSM_HOOK(int, 0, inode_getattr, const struct path *path) +LSM_HOOK(int, 0, inode_xattr_skipcap, const char *name) LSM_HOOK(int, 0, inode_setxattr, struct mnt_idmap *idmap, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index f0e55bf3ec8b..ad1ce650146c 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -576,9 +576,12 @@ static inline const char *mlx5_qp_state_str(int state) static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev) { - return !MLX5_CAP_ROCE(dev, qp_ts_format) ? - MLX5_TIMESTAMP_FORMAT_FREE_RUNNING : - MLX5_TIMESTAMP_FORMAT_DEFAULT; + u8 supported_ts_cap = mlx5_get_roce_state(dev) ? + MLX5_CAP_ROCE(dev, qp_ts_format) : + MLX5_CAP_GEN(dev, sq_ts_format); + + return supported_ts_cap ? MLX5_TIMESTAMP_FORMAT_DEFAULT : + MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; } #endif /* MLX5_QP_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index eb7c96d24ac0..b58bad248eef 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3177,21 +3177,7 @@ extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid); /* Free the reserved page into the buddy system, so it gets managed. */ -static inline void free_reserved_page(struct page *page) -{ - if (mem_alloc_profiling_enabled()) { - union codetag_ref *ref = get_page_tag_ref(page); - - if (ref) { - set_codetag_empty(ref); - put_page_tag_ref(ref); - } - } - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - adjust_managed_page_count(page, 1); -} +void free_reserved_page(struct page *page); #define free_highmem_page(page) free_reserved_page(page) static inline void mark_page_reserved(struct page *page) diff --git a/include/linux/objagg.h b/include/linux/objagg.h index 78021777df46..6df5b887dc54 100644 --- a/include/linux/objagg.h +++ b/include/linux/objagg.h @@ -8,7 +8,6 @@ struct objagg_ops { size_t obj_size; bool (*delta_check)(void *priv, const void *parent_obj, const void *obj); - int (*hints_obj_cmp)(const void *obj1, const void *obj2); void * (*delta_create)(void *priv, void *parent_obj, void *obj); void (*delta_destroy)(void *priv, void *delta_priv); void * (*root_create)(void *priv, void *obj, unsigned int root_id); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a5304ae8c654..393fb13733b0 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -786,6 +786,7 @@ struct perf_event { struct irq_work pending_irq; struct callback_head pending_task; unsigned int pending_work; + struct rcuwait pending_work_wait; atomic_t event_limit; diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 9cacadbd61f8..18cd0c0c73d9 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -15,7 +15,7 @@ extern struct page_ext_operations page_alloc_tagging_ops; static inline union codetag_ref *codetag_ref_from_page_ext(struct page_ext *page_ext) { - return (void *)page_ext + page_alloc_tagging_ops.offset; + return (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops); } static inline struct page_ext *page_ext_from_codetag_ref(union codetag_ref *ref) @@ -71,6 +71,7 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) static inline void pgalloc_tag_split(struct page *page, unsigned int nr) { int i; + struct page_ext *first_page_ext; struct page_ext *page_ext; union codetag_ref *ref; struct alloc_tag *tag; @@ -78,7 +79,7 @@ static inline void pgalloc_tag_split(struct page *page, unsigned int nr) if (!mem_alloc_profiling_enabled()) return; - page_ext = page_ext_get(page); + first_page_ext = page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; @@ -94,7 +95,7 @@ static inline void pgalloc_tag_split(struct page *page, unsigned int nr) page_ext = page_ext_next(page_ext); } out: - page_ext_put(page_ext); + page_ext_put(first_page_ext); } static inline struct alloc_tag *pgalloc_tag_get(struct page *page) diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 7233e9cf1bab..ce76f1a45722 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -481,4 +481,45 @@ DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable()) DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace()) DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable()) +#ifdef CONFIG_PREEMPT_DYNAMIC + +extern bool preempt_model_none(void); +extern bool preempt_model_voluntary(void); +extern bool preempt_model_full(void); + +#else + +static inline bool preempt_model_none(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_NONE); +} +static inline bool preempt_model_voluntary(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); +} +static inline bool preempt_model_full(void) +{ + return IS_ENABLED(CONFIG_PREEMPT); +} + +#endif + +static inline bool preempt_model_rt(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_RT); +} + +/* + * Does the preemption model allow non-cooperative preemption? + * + * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with + * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the + * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the + * PREEMPT_NONE model. + */ +static inline bool preempt_model_preemptible(void) +{ + return preempt_model_full() || preempt_model_rt(); +} + #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index d662cf136021..c09cdcc99471 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -36,6 +36,11 @@ struct sbitmap_word { * @cleared: word holding cleared bits */ unsigned long cleared ____cacheline_aligned_in_smp; + + /** + * @swap_lock: serializes simultaneous updates of ->word and ->cleared + */ + spinlock_t swap_lock; } ____cacheline_aligned_in_smp; /** diff --git a/include/linux/sched.h b/include/linux/sched.h index a5f4b48fca18..76214d7c819d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2064,47 +2064,6 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock); __cond_resched_rwlock_write(lock); \ }) -#ifdef CONFIG_PREEMPT_DYNAMIC - -extern bool preempt_model_none(void); -extern bool preempt_model_voluntary(void); -extern bool preempt_model_full(void); - -#else - -static inline bool preempt_model_none(void) -{ - return IS_ENABLED(CONFIG_PREEMPT_NONE); -} -static inline bool preempt_model_voluntary(void) -{ - return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); -} -static inline bool preempt_model_full(void) -{ - return IS_ENABLED(CONFIG_PREEMPT); -} - -#endif - -static inline bool preempt_model_rt(void) -{ - return IS_ENABLED(CONFIG_PREEMPT_RT); -} - -/* - * Does the preemption model allow non-cooperative preemption? - * - * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with - * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the - * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the - * PREEMPT_NONE model. - */ -static inline bool preempt_model_preemptible(void) -{ - return preempt_model_full() || preempt_model_rt(); -} - static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h index 75303c126285..6a4a3cec4638 100644 --- a/include/linux/screen_info.h +++ b/include/linux/screen_info.h @@ -49,6 +49,16 @@ static inline u64 __screen_info_lfb_size(const struct screen_info *si, unsigned return lfb_size; } +static inline bool __screen_info_vbe_mode_nonvga(const struct screen_info *si) +{ + /* + * VESA modes typically run on VGA hardware. Set bit 5 signals that this + * is not the case. Drivers can then not make use of VGA resources. See + * Sec 4.4 of the VBE 2.0 spec. + */ + return si->vesa_attributes & BIT(5); +} + static inline unsigned int __screen_info_video_type(unsigned int type) { switch (type) { diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 3fcd20de6ca8..63dd8cf3c3c2 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -462,11 +462,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock) */ static inline int spin_needbreak(spinlock_t *lock) { -#ifdef CONFIG_PREEMPTION + if (!preempt_model_preemptible()) + return 0; + return spin_is_contended(lock); -#else - return 0; -#endif } /* @@ -479,11 +478,10 @@ static inline int spin_needbreak(spinlock_t *lock) */ static inline int rwlock_needbreak(rwlock_t *lock) { -#ifdef CONFIG_PREEMPTION + if (!preempt_model_preemptible()) + return 0; + return rwlock_is_contended(lock); -#else - return 0; -#endif } /* diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 795ef5a68429..26b8a47f41fc 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -30,7 +30,8 @@ int task_work_add(struct task_struct *task, struct callback_head *twork, struct callback_head *task_work_cancel_match(struct task_struct *task, bool (*match)(struct callback_head *, void *data), void *data); -struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); +struct callback_head *task_work_cancel_func(struct task_struct *, task_work_func_t); +bool task_work_cancel(struct task_struct *task, struct callback_head *cb); void task_work_run(void); static inline void exit_task_work(struct task_struct *task) diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 4dfa9b69ca8d..d1d7825318c3 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -56,6 +56,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, unsigned int thlen = 0; unsigned int p_off = 0; unsigned int ip_proto; + u64 ret, remainder, gso_size; if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -98,6 +99,16 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset); u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16)); + if (hdr->gso_size) { + gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); + ret = div64_u64_rem(skb->len, gso_size, &remainder); + if (!(ret && (hdr->gso_size > needed) && + ((remainder > needed) || (remainder == 0)))) { + return -EINVAL; + } + skb_shinfo(skb)->tx_flags |= SKBFL_SHARED_FRAG; + } + if (!pskb_may_pull(skb, needed)) return -EINVAL; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index c43716edf205..b15f51ae3bfd 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -91,8 +91,6 @@ struct discovery_state { s8 rssi; u16 uuid_count; u8 (*uuids)[16]; - unsigned long scan_start; - unsigned long scan_duration; unsigned long name_resolve_timeout; }; @@ -890,8 +888,6 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev) hdev->discovery.uuid_count = 0; kfree(hdev->discovery.uuids); hdev->discovery.uuids = NULL; - hdev->discovery.scan_start = 0; - hdev->discovery.scan_duration = 0; } bool hci_discovery_active(struct hci_dev *hdev); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index a18ed24fed94..6dbdf60b342f 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -127,18 +127,26 @@ void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args, static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i, const struct in6_addr *daddr, - unsigned int prefs, + unsigned int prefs, int l3mdev_index, struct in6_addr *saddr) { + struct net_device *l3mdev; + struct net_device *dev; + bool same_vrf; int err = 0; - if (f6i && f6i->fib6_prefsrc.plen) { + rcu_read_lock(); + + l3mdev = dev_get_by_index_rcu(net, l3mdev_index); + if (!f6i || !f6i->fib6_prefsrc.plen || l3mdev) + dev = f6i ? fib6_info_nh_dev(f6i) : NULL; + same_vrf = !l3mdev || l3mdev_master_dev_rcu(dev) == l3mdev; + if (f6i && f6i->fib6_prefsrc.plen && same_vrf) *saddr = f6i->fib6_prefsrc.addr; - } else { - struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL; + else + err = ipv6_dev_get_saddr(net, same_vrf ? dev : l3mdev, daddr, prefs, saddr); - err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr); - } + rcu_read_unlock(); return err; } diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 9b2f69ba5e49..c29639b4323f 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -173,6 +173,7 @@ struct fib_result { unsigned char type; unsigned char scope; u32 tclassid; + dscp_t dscp; struct fib_nh_common *nhc; struct fib_info *fi; struct fib_table *table; diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 561f6719fb4e..f207a6e1042a 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -796,4 +796,6 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, u32 doorbell_pg_id); void mana_uncfg_vport(struct mana_port_context *apc); + +struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index); #endif /* _MANA_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 060e95b331a2..32815a40dea1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -677,6 +677,7 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb, /* tcp_input.c */ void tcp_rearm_rto(struct sock *sk); void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); +void tcp_done_with_error(struct sock *sk, int err); void tcp_reset(struct sock *sk, struct sk_buff *skb); void tcp_fin(struct sock *sk); void tcp_check_space(struct sock *sk); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 77ebf5bcf0b9..7d4c2235252c 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -178,7 +178,10 @@ struct xfrm_state { struct hlist_node gclist; struct hlist_node bydst; }; - struct hlist_node bysrc; + union { + struct hlist_node dev_gclist; + struct hlist_node bysrc; + }; struct hlist_node byspi; struct hlist_node byseq; @@ -1588,7 +1591,7 @@ void xfrm_state_update_stats(struct net *net); static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) { struct xfrm_dev_offload *xdo = &x->xso; - struct net_device *dev = xdo->dev; + struct net_device *dev = READ_ONCE(xdo->dev); if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_state_update_stats) @@ -1946,13 +1949,16 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, struct xfrm_user_offload *xuo, u8 dir, struct netlink_ext_ack *extack); bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); +void xfrm_dev_state_delete(struct xfrm_state *x); +void xfrm_dev_state_free(struct xfrm_state *x); static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x) { struct xfrm_dev_offload *xso = &x->xso; + struct net_device *dev = READ_ONCE(xso->dev); - if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn) - xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); + if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn) + dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); } static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) @@ -1973,28 +1979,6 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) return false; } -static inline void xfrm_dev_state_delete(struct xfrm_state *x) -{ - struct xfrm_dev_offload *xso = &x->xso; - - if (xso->dev) - xso->dev->xfrmdev_ops->xdo_dev_state_delete(x); -} - -static inline void xfrm_dev_state_free(struct xfrm_state *x) -{ - struct xfrm_dev_offload *xso = &x->xso; - struct net_device *dev = xso->dev; - - if (dev && dev->xfrmdev_ops) { - if (dev->xfrmdev_ops->xdo_dev_state_free) - dev->xfrmdev_ops->xdo_dev_state_free(x); - xso->dev = NULL; - xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; - netdev_put(dev, &xso->dev_tracker); - } -} - static inline void xfrm_dev_policy_delete(struct xfrm_policy *x) { struct xfrm_dev_offload *xdo = &x->xdo; diff --git a/include/sound/tas2781-dsp.h b/include/sound/tas2781-dsp.h index 7fba7ea26a4b..3cda9da14f6d 100644 --- a/include/sound/tas2781-dsp.h +++ b/include/sound/tas2781-dsp.h @@ -117,10 +117,17 @@ struct tasdevice_fw { struct device *dev; }; -enum tasdevice_dsp_fw_state { - TASDEVICE_DSP_FW_NONE = 0, +enum tasdevice_fw_state { + /* Driver in startup mode, not load any firmware. */ TASDEVICE_DSP_FW_PENDING, + /* DSP firmware in the system, but parsing error. */ TASDEVICE_DSP_FW_FAIL, + /* + * Only RCA (Reconfigurable Architecture) firmware load + * successfully. + */ + TASDEVICE_RCA_FW_OK, + /* Both RCA and DSP firmware load successfully. */ TASDEVICE_DSP_FW_ALL_OK, }; diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h index 7f0c1ceae726..b0b6300a0cab 100644 --- a/include/trace/events/rpcgss.h +++ b/include/trace/events/rpcgss.h @@ -54,7 +54,7 @@ TRACE_DEFINE_ENUM(GSS_S_UNSEQ_TOKEN); TRACE_DEFINE_ENUM(GSS_S_GAP_TOKEN); #define show_gss_status(x) \ - __print_flags(x, "|", \ + __print_symbolic(x, \ { GSS_S_BAD_MECH, "GSS_S_BAD_MECH" }, \ { GSS_S_BAD_NAME, "GSS_S_BAD_NAME" }, \ { GSS_S_BAD_NAMETYPE, "GSS_S_BAD_NAMETYPE" }, \ diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 1446c3bae515..d425b83181df 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -776,7 +776,13 @@ struct drm_xe_gem_create { #define DRM_XE_GEM_CPU_CACHING_WC 2 /** * @cpu_caching: The CPU caching mode to select for this object. If - * mmaping the object the mode selected here will also be used. + * mmaping the object the mode selected here will also be used. The + * exception is when mapping system memory (including data evicted + * to system) on discrete GPUs. The caching mode selected will + * then be overridden to DRM_XE_GEM_CPU_CACHING_WB, and coherency + * between GPU- and CPU is guaranteed. The caching mode of + * existing CPU-mappings will be updated transparently to + * user-space clients. */ __u16 cpu_caching; /** @pad: MBZ */ diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index d31698410410..42ec5ddaab8d 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -41,6 +41,10 @@ */ #define XDP_UMEM_TX_SW_CSUM (1 << 1) +/* Request to reserve tx_metadata_len bytes of per-chunk metadata. + */ +#define XDP_UMEM_TX_METADATA_LEN (1 << 2) + struct sockaddr_xdp { __u16 sxdp_family; __u16 sxdp_flags; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index aa4094ca2444..639894ed1b97 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1376,7 +1376,7 @@ enum nft_secmark_attributes { #define NFTA_SECMARK_MAX (__NFTA_SECMARK_MAX - 1) /* Max security context length */ -#define NFT_SECMARK_CTX_MAXLEN 256 +#define NFT_SECMARK_CTX_MAXLEN 4096 /** * enum nft_reject_types - nf_tables reject expression reject types diff --git a/include/uapi/linux/zorro_ids.h b/include/uapi/linux/zorro_ids.h index 6e574d7b7d79..393f2ee9c042 100644 --- a/include/uapi/linux/zorro_ids.h +++ b/include/uapi/linux/zorro_ids.h @@ -449,6 +449,9 @@ #define ZORRO_PROD_VMC_ISDN_BLASTER_Z2 ZORRO_ID(VMC, 0x01, 0) #define ZORRO_PROD_VMC_HYPERCOM_4 ZORRO_ID(VMC, 0x02, 0) +#define ZORRO_MANUF_CSLAB 0x1400 +#define ZORRO_PROD_CSLAB_WARP_1260 ZORRO_ID(CSLAB, 0x65, 0) + #define ZORRO_MANUF_INFORMATION 0x157C #define ZORRO_PROD_INFORMATION_ISDN_ENGINE_I ZORRO_ID(INFORMATION, 0x64, 0) diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index bad88bd91995..d965e4d1277e 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -1131,6 +1131,12 @@ static inline bool is_mcq_enabled(struct ufs_hba *hba) return hba->mcq_enabled; } +static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba, + enum ufshcd_mcq_opr opr, int idx) +{ + return hba->mcq_opr[opr].offset + hba->mcq_opr[opr].stride * idx; +} + #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba) { diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index 7d3316fe9bfc..22dac5850327 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -23,6 +23,7 @@ #include "io_uring.h" #define WORKER_IDLE_TIMEOUT (5 * HZ) +#define WORKER_INIT_LIMIT 3 enum { IO_WORKER_F_UP = 0, /* up and active */ @@ -58,6 +59,7 @@ struct io_worker { unsigned long create_state; struct callback_head create_work; + int init_retries; union { struct rcu_head rcu; @@ -744,7 +746,7 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data) return true; } -static inline bool io_should_retry_thread(long err) +static inline bool io_should_retry_thread(struct io_worker *worker, long err) { /* * Prevent perpetual task_work retry, if the task (or its group) is @@ -752,6 +754,8 @@ static inline bool io_should_retry_thread(long err) */ if (fatal_signal_pending(current)) return false; + if (worker->init_retries++ >= WORKER_INIT_LIMIT) + return false; switch (err) { case -EAGAIN: @@ -778,7 +782,7 @@ static void create_worker_cont(struct callback_head *cb) io_init_new_worker(wq, worker, tsk); io_worker_release(worker); return; - } else if (!io_should_retry_thread(PTR_ERR(tsk))) { + } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) { struct io_wq_acct *acct = io_wq_get_acct(worker); atomic_dec(&acct->nr_running); @@ -845,7 +849,7 @@ static bool create_io_worker(struct io_wq *wq, int index) tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); if (!IS_ERR(tsk)) { io_init_new_worker(wq, worker, tsk); - } else if (!io_should_retry_thread(PTR_ERR(tsk))) { + } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) { kfree(worker); goto fail; } else { diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index c326e2127dd4..896e707e0618 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -3071,8 +3071,11 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) bool loop = false; io_uring_drop_tctx_refs(current); + if (!tctx_inflight(tctx, !cancel_all)) + break; + /* read completions before cancelations */ - inflight = tctx_inflight(tctx, !cancel_all); + inflight = tctx_inflight(tctx, false); if (!inflight) break; diff --git a/io_uring/napi.c b/io_uring/napi.c index 8c18ede595c4..080d10e0e0af 100644 --- a/io_uring/napi.c +++ b/io_uring/napi.c @@ -222,6 +222,8 @@ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg) }; struct io_uring_napi napi; + if (ctx->flags & IORING_SETUP_IOPOLL) + return -EINVAL; if (copy_from_user(&napi, arg, sizeof(napi))) return -EFAULT; if (napi.pad[0] || napi.pad[1] || napi.pad[2] || napi.resv) diff --git a/io_uring/opdef.c b/io_uring/opdef.c index 2e3b7b16effb..760006ccc408 100644 --- a/io_uring/opdef.c +++ b/io_uring/opdef.c @@ -725,6 +725,14 @@ const char *io_uring_get_opcode(u8 opcode) return "INVALID"; } +bool io_uring_op_supported(u8 opcode) +{ + if (opcode < IORING_OP_LAST && + io_issue_defs[opcode].prep != io_eopnotsupp_prep) + return true; + return false; +} + void __init io_uring_optable_init(void) { int i; diff --git a/io_uring/opdef.h b/io_uring/opdef.h index 7ee6f5aa90aa..14456436ff74 100644 --- a/io_uring/opdef.h +++ b/io_uring/opdef.h @@ -17,8 +17,6 @@ struct io_issue_def { unsigned poll_exclusive : 1; /* op supports buffer selection */ unsigned buffer_select : 1; - /* opcode is not supported by this kernel */ - unsigned not_supported : 1; /* skip auditing */ unsigned audit_skip : 1; /* supports ioprio */ @@ -47,5 +45,7 @@ struct io_cold_def { extern const struct io_issue_def io_issue_defs[]; extern const struct io_cold_def io_cold_defs[]; +bool io_uring_op_supported(u8 opcode); + void io_uring_optable_init(void); #endif diff --git a/io_uring/register.c b/io_uring/register.c index c0010a66a6f2..11517b34cfc8 100644 --- a/io_uring/register.c +++ b/io_uring/register.c @@ -113,7 +113,7 @@ static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg, for (i = 0; i < nr_args; i++) { p->ops[i].op = i; - if (!io_issue_defs[i].not_supported) + if (io_uring_op_supported(i)) p->ops[i].flags = IO_URING_OP_SUPPORTED; } p->ops_len = i; diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 1c9bf07499b1..9973876d91b0 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -639,7 +639,7 @@ void io_queue_linked_timeout(struct io_kiocb *req) static bool io_match_task(struct io_kiocb *head, struct task_struct *task, bool cancel_all) - __must_hold(&req->ctx->timeout_lock) + __must_hold(&head->ctx->timeout_lock) { struct io_kiocb *req; diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 21ac5fb2d5f0..a54163a83968 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -265,7 +265,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) req_set_fail(req); io_req_uring_cleanup(req, issue_flags); io_req_set_res(req, ret, 0); - return ret; + return ret < 0 ? ret : IOU_OK; } int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 821063660d9f..fe360b5b211d 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -414,7 +414,7 @@ const char *btf_type_str(const struct btf_type *t) struct btf_show { u64 flags; void *target; /* target of show operation (seq file, buffer) */ - void (*showfn)(struct btf_show *show, const char *fmt, va_list args); + __printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args); const struct btf *btf; /* below are used during iteration */ struct { @@ -7370,8 +7370,8 @@ static void btf_type_show(const struct btf *btf, u32 type_id, void *obj, btf_type_ops(t)->show(btf, t, type_id, obj, 0, show); } -static void btf_seq_show(struct btf_show *show, const char *fmt, - va_list args) +__printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt, + va_list args) { seq_vprintf((struct seq_file *)show->target, fmt, args); } @@ -7404,8 +7404,8 @@ struct btf_show_snprintf { int len; /* length we would have written */ }; -static void btf_snprintf_show(struct btf_show *show, const char *fmt, - va_list args) +__printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt, + va_list args) { struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show; int len; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 3243c83ef3e3..7268370600f6 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2786,7 +2786,7 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) } __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, - int (callback_fn)(void *map, int *key, struct bpf_wq *wq), + int (callback_fn)(void *map, int *key, void *value), unsigned int flags, void *aux__ign) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 214a9fa8c6fb..6b422c275f78 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3215,7 +3215,8 @@ static int insn_def_regno(const struct bpf_insn *insn) case BPF_ST: return -1; case BPF_STX: - if (BPF_MODE(insn->code) == BPF_ATOMIC && + if ((BPF_MODE(insn->code) == BPF_ATOMIC || + BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) && (insn->imm & BPF_FETCH)) { if (insn->imm == BPF_CMPXCHG) return BPF_REG_0; @@ -18767,7 +18768,7 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta) } else { if (i + 1 + insn->off != tgt_idx) continue; - if (signed_add16_overflows(insn->imm, delta)) + if (signed_add16_overflows(insn->off, delta)) return -ERANGE; insn->off += delta; } diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index c12b9fdb22a4..5e468db95810 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -21,6 +21,7 @@ * License. See the file COPYING in the main directory of the Linux * distribution for more details. */ +#include "cgroup-internal.h" #include <linux/cpu.h> #include <linux/cpumask.h> @@ -169,7 +170,7 @@ struct cpuset { /* for custom sched domain */ int relax_domain_level; - /* number of valid sub-partitions */ + /* number of valid local child partitions */ int nr_subparts; /* partition root state */ @@ -957,13 +958,15 @@ static int generate_sched_domains(cpumask_var_t **domains, int nslot; /* next empty doms[] struct cpumask slot */ struct cgroup_subsys_state *pos_css; bool root_load_balance = is_sched_load_balance(&top_cpuset); + bool cgrpv2 = cgroup_subsys_on_dfl(cpuset_cgrp_subsys); doms = NULL; dattr = NULL; csa = NULL; /* Special case for the 99% of systems with one, full, sched domain */ - if (root_load_balance && !top_cpuset.nr_subparts) { + if (root_load_balance && cpumask_empty(subpartitions_cpus)) { +single_root_domain: ndoms = 1; doms = alloc_sched_domains(ndoms); if (!doms) @@ -991,16 +994,18 @@ static int generate_sched_domains(cpumask_var_t **domains, cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { if (cp == &top_cpuset) continue; + + if (cgrpv2) + goto v2; + /* + * v1: * Continue traversing beyond @cp iff @cp has some CPUs and * isn't load balancing. The former is obvious. The * latter: All child cpusets contain a subset of the * parent's cpus, so just skip them, and then we call * update_domain_attr_tree() to calc relax_domain_level of * the corresponding sched domain. - * - * If root is load-balancing, we can skip @cp if it - * is a subset of the root's effective_cpus. */ if (!cpumask_empty(cp->cpus_allowed) && !(is_sched_load_balance(cp) && @@ -1008,20 +1013,39 @@ static int generate_sched_domains(cpumask_var_t **domains, housekeeping_cpumask(HK_TYPE_DOMAIN)))) continue; - if (root_load_balance && - cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) - continue; - if (is_sched_load_balance(cp) && !cpumask_empty(cp->effective_cpus)) csa[csn++] = cp; - /* skip @cp's subtree if not a partition root */ - if (!is_partition_valid(cp)) + /* skip @cp's subtree */ + pos_css = css_rightmost_descendant(pos_css); + continue; + +v2: + /* + * Only valid partition roots that are not isolated and with + * non-empty effective_cpus will be saved into csn[]. + */ + if ((cp->partition_root_state == PRS_ROOT) && + !cpumask_empty(cp->effective_cpus)) + csa[csn++] = cp; + + /* + * Skip @cp's subtree if not a partition root and has no + * exclusive CPUs to be granted to child cpusets. + */ + if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus)) pos_css = css_rightmost_descendant(pos_css); } rcu_read_unlock(); + /* + * If there are only isolated partitions underneath the cgroup root, + * we can optimize out unneeded sched domains scanning. + */ + if (root_load_balance && (csn == 1)) + goto single_root_domain; + for (i = 0; i < csn; i++) csa[i]->pn = i; ndoms = csn; @@ -1064,6 +1088,20 @@ static int generate_sched_domains(cpumask_var_t **domains, dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), GFP_KERNEL); + /* + * Cgroup v2 doesn't support domain attributes, just set all of them + * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a + * subset of HK_TYPE_DOMAIN housekeeping CPUs. + */ + if (cgrpv2) { + for (i = 0; i < ndoms; i++) { + cpumask_copy(doms[i], csa[i]->effective_cpus); + if (dattr) + dattr[i] = SD_ATTR_INIT; + } + goto done; + } + for (nslot = 0, i = 0; i < csn; i++) { struct cpuset *a = csa[i]; struct cpumask *dp; @@ -1223,7 +1261,7 @@ static void rebuild_sched_domains_locked(void) * root should be only a subset of the active CPUs. Since a CPU in any * partition root could be offlined, all must be checked. */ - if (top_cpuset.nr_subparts) { + if (!cpumask_empty(subpartitions_cpus)) { rcu_read_lock(); cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { if (!is_partition_valid(cs)) { @@ -4571,7 +4609,7 @@ static void cpuset_handle_hotplug(void) * In the rare case that hotplug removes all the cpus in * subpartitions_cpus, we assumed that cpus are updated. */ - if (!cpus_updated && top_cpuset.nr_subparts) + if (!cpus_updated && !cpumask_empty(subpartitions_cpus)) cpus_updated = true; /* For v1, synchronize cpus_allowed to cpu_active_mask */ @@ -5051,10 +5089,14 @@ int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, if (!buf) goto out; - css = task_get_css(tsk, cpuset_cgrp_id); - retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, - current->nsproxy->cgroup_ns); - css_put(css); + rcu_read_lock(); + spin_lock_irq(&css_set_lock); + css = task_css(tsk, cpuset_cgrp_id); + retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX, + current->nsproxy->cgroup_ns); + spin_unlock_irq(&css_set_lock); + rcu_read_unlock(); + if (retval == -E2BIG) retval = -ENAMETOOLONG; if (retval < 0) diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 3131334d7a81..6a77f1c779c4 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -206,7 +206,7 @@ char kdb_getchar(void) */ static void kdb_position_cursor(char *prompt, char *buffer, char *cp) { - kdb_printf("\r%s", kdb_prompt_str); + kdb_printf("\r%s", prompt); if (cp > buffer) kdb_printf("%.*s", (int)(cp - buffer), buffer); } @@ -362,7 +362,7 @@ static char *kdb_read(char *buffer, size_t bufsize) if (i >= dtab_count) kdb_printf("..."); kdb_printf("\n"); - kdb_printf(kdb_prompt_str); + kdb_printf("%s", kdb_prompt_str); kdb_printf("%s", buffer); if (cp != lastchar) kdb_position_cursor(kdb_prompt_str, buffer, cp); @@ -453,7 +453,7 @@ char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt) { if (prompt && kdb_prompt_str != prompt) strscpy(kdb_prompt_str, prompt, CMD_BUFLEN); - kdb_printf(kdb_prompt_str); + kdb_printf("%s", kdb_prompt_str); kdb_nextline = 1; /* Prompt and input resets line number */ return kdb_read(buffer, bufsize); } diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 81de84318ccc..b1c18058d55f 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -67,8 +67,8 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, { struct dma_devres match_data = { size, vaddr, dma_handle }; - dma_free_coherent(dev, size, vaddr, dma_handle); WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); + dma_free_coherent(dev, size, vaddr, dma_handle); } EXPORT_SYMBOL(dmam_free_coherent); diff --git a/kernel/events/core.c b/kernel/events/core.c index 8f908f077935..b2a6aec118f3 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2284,18 +2284,14 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx) } if (event->pending_sigtrap) { - bool dec = true; - event->pending_sigtrap = 0; if (state != PERF_EVENT_STATE_OFF && - !event->pending_work) { + !event->pending_work && + !task_work_add(current, &event->pending_task, TWA_RESUME)) { event->pending_work = 1; - dec = false; - WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount)); - task_work_add(current, &event->pending_task, TWA_RESUME); - } - if (dec) + } else { local_dec(&event->ctx->nr_pending); + } } perf_event_set_state(event, state); @@ -5206,9 +5202,35 @@ static bool exclusive_event_installable(struct perf_event *event, static void perf_addr_filters_splice(struct perf_event *event, struct list_head *head); +static void perf_pending_task_sync(struct perf_event *event) +{ + struct callback_head *head = &event->pending_task; + + if (!event->pending_work) + return; + /* + * If the task is queued to the current task's queue, we + * obviously can't wait for it to complete. Simply cancel it. + */ + if (task_work_cancel(current, head)) { + event->pending_work = 0; + local_dec(&event->ctx->nr_pending); + return; + } + + /* + * All accesses related to the event are within the same + * non-preemptible section in perf_pending_task(). The RCU + * grace period before the event is freed will make sure all + * those accesses are complete by then. + */ + rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE); +} + static void _free_event(struct perf_event *event) { irq_work_sync(&event->pending_irq); + perf_pending_task_sync(event); unaccount_event(event); @@ -6509,6 +6531,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; nr_pages = vma_size / PAGE_SIZE; + if (nr_pages > INT_MAX) + return -ENOMEM; mutex_lock(&event->mmap_mutex); ret = -EINVAL; @@ -6831,24 +6855,28 @@ static void perf_pending_task(struct callback_head *head) struct perf_event *event = container_of(head, struct perf_event, pending_task); int rctx; + /* + * All accesses to the event must belong to the same implicit RCU read-side + * critical section as the ->pending_work reset. See comment in + * perf_pending_task_sync(). + */ + preempt_disable_notrace(); /* * If we 'fail' here, that's OK, it means recursion is already disabled * and we won't recurse 'further'. */ - preempt_disable_notrace(); rctx = perf_swevent_get_recursion_context(); if (event->pending_work) { event->pending_work = 0; perf_sigtrap(event); local_dec(&event->ctx->nr_pending); + rcuwait_wake_up(&event->pending_work_wait); } if (rctx >= 0) perf_swevent_put_recursion_context(rctx); preempt_enable_notrace(); - - put_event(event); } #ifdef CONFIG_GUEST_PERF_EVENTS @@ -9302,21 +9330,19 @@ static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog, bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD; int i; - if (prog->aux->func_cnt == 0) { - perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, - (u64)(unsigned long)prog->bpf_func, - prog->jited_len, unregister, - prog->aux->ksym.name); - } else { - for (i = 0; i < prog->aux->func_cnt; i++) { - struct bpf_prog *subprog = prog->aux->func[i]; - - perf_event_ksymbol( - PERF_RECORD_KSYMBOL_TYPE_BPF, - (u64)(unsigned long)subprog->bpf_func, - subprog->jited_len, unregister, - subprog->aux->ksym.name); - } + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, + (u64)(unsigned long)prog->bpf_func, + prog->jited_len, unregister, + prog->aux->ksym.name); + + for (i = 1; i < prog->aux->func_cnt; i++) { + struct bpf_prog *subprog = prog->aux->func[i]; + + perf_event_ksymbol( + PERF_RECORD_KSYMBOL_TYPE_BPF, + (u64)(unsigned long)subprog->bpf_func, + subprog->jited_len, unregister, + subprog->aux->ksym.name); } } @@ -11962,6 +11988,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, init_waitqueue_head(&event->waitq); init_irq_work(&event->pending_irq, perf_pending_irq); init_task_work(&event->pending_task, perf_pending_task); + rcuwait_init(&event->pending_work_wait); mutex_init(&event->mmap_mutex); raw_spin_lock_init(&event->addr_filters.lock); diff --git a/kernel/events/internal.h b/kernel/events/internal.h index 5150d5f84c03..386d21c7edfa 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -128,7 +128,7 @@ static inline unsigned long perf_data_size(struct perf_buffer *rb) static inline unsigned long perf_aux_size(struct perf_buffer *rb) { - return rb->aux_nr_pages << PAGE_SHIFT; + return (unsigned long)rb->aux_nr_pages << PAGE_SHIFT; } #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \ diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 4013408ce012..485cf0a66631 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -688,7 +688,9 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event, * max_order, to aid PMU drivers in double buffering. */ if (!watermark) - watermark = nr_pages << (PAGE_SHIFT - 1); + watermark = min_t(unsigned long, + U32_MAX, + (unsigned long)nr_pages << (PAGE_SHIFT - 1)); /* * Use aux_watermark as the basis for chunking to diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index aadc8891cc16..efeacf17c239 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -155,7 +155,6 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode, switch (fwid->type) { case IRQCHIP_FWNODE_NAMED: case IRQCHIP_FWNODE_NAMED_ID: - domain->fwnode = fwnode; domain->name = kstrdup(fwid->name, GFP_KERNEL); if (!domain->name) { kfree(domain); @@ -164,7 +163,6 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode, domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; break; default: - domain->fwnode = fwnode; domain->name = fwid->name; break; } @@ -184,7 +182,6 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode, } domain->name = strreplace(name, '/', ':'); - domain->fwnode = fwnode; domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; } @@ -200,8 +197,8 @@ static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode, domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; } - fwnode_handle_get(fwnode); - fwnode_dev_initialized(fwnode, true); + domain->fwnode = fwnode_handle_get(fwnode); + fwnode_dev_initialized(domain->fwnode, true); /* Fill structure */ INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 71b0fc2d0aea..dd53298ef1a5 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1337,7 +1337,7 @@ static int irq_thread(void *data) * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the * oneshot mask bit can be set. */ - task_work_cancel(current, irq_thread_dtor); + task_work_cancel_func(current, irq_thread_dtor); return 0; } diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 3218fa5688b9..1f05a19918f4 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -131,7 +131,7 @@ bool static_key_fast_inc_not_disabled(struct static_key *key) STATIC_KEY_CHECK_USE(key); /* * Negative key->enabled has a special meaning: it sends - * static_key_slow_inc() down the slow path, and it is non-zero + * static_key_slow_inc/dec() down the slow path, and it is non-zero * so it counts as "enabled" in jump_label_update(). Note that * atomic_inc_unless_negative() checks >= 0, so roll our own. */ @@ -150,7 +150,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key) lockdep_assert_cpus_held(); /* - * Careful if we get concurrent static_key_slow_inc() calls; + * Careful if we get concurrent static_key_slow_inc/dec() calls; * later calls must wait for the first one to _finish_ the * jump_label_update() process. At the same time, however, * the jump_label_update() call below wants to see @@ -247,20 +247,32 @@ EXPORT_SYMBOL_GPL(static_key_disable); static bool static_key_slow_try_dec(struct static_key *key) { - int val; - - val = atomic_fetch_add_unless(&key->enabled, -1, 1); - if (val == 1) - return false; + int v; /* - * The negative count check is valid even when a negative - * key->enabled is in use by static_key_slow_inc(); a - * __static_key_slow_dec() before the first static_key_slow_inc() - * returns is unbalanced, because all other static_key_slow_inc() - * instances block while the update is in progress. + * Go into the slow path if key::enabled is less than or equal than + * one. One is valid to shut down the key, anything less than one + * is an imbalance, which is handled at the call site. + * + * That includes the special case of '-1' which is set in + * static_key_slow_inc_cpuslocked(), but that's harmless as it is + * fully serialized in the slow path below. By the time this task + * acquires the jump label lock the value is back to one and the + * retry under the lock must succeed. */ - WARN(val < 0, "jump label: negative count!\n"); + v = atomic_read(&key->enabled); + do { + /* + * Warn about the '-1' case though; since that means a + * decrement is concurrent with a first (0->1) increment. IOW + * people are trying to disable something that wasn't yet fully + * enabled. This suggests an ordering problem on the user side. + */ + WARN_ON_ONCE(v < 0); + if (v <= 1) + return false; + } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1))); + return true; } @@ -271,10 +283,11 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key) if (static_key_slow_try_dec(key)) return; - jump_label_lock(); - if (atomic_dec_and_test(&key->enabled)) + guard(mutex)(&jump_label_mutex); + if (atomic_cmpxchg(&key->enabled, 1, 0)) jump_label_update(key); - jump_label_unlock(); + else + WARN_ON_ONCE(!static_key_slow_try_dec(key)); } static void __static_key_slow_dec(struct static_key *key) diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index c6d17aee4209..33cac79e3994 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -1297,7 +1297,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) /* * lock for writing */ -static inline int __down_write_common(struct rw_semaphore *sem, int state) +static __always_inline int __down_write_common(struct rw_semaphore *sem, int state) { int ret = 0; @@ -1310,12 +1310,12 @@ static inline int __down_write_common(struct rw_semaphore *sem, int state) return ret; } -static inline void __down_write(struct rw_semaphore *sem) +static __always_inline void __down_write(struct rw_semaphore *sem) { __down_write_common(sem, TASK_UNINTERRUPTIBLE); } -static inline int __down_write_killable(struct rw_semaphore *sem) +static __always_inline int __down_write_killable(struct rw_semaphore *sem) { return __down_write_common(sem, TASK_KILLABLE); } diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index e1bf33018e6d..098e82bcc427 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1757,6 +1757,16 @@ static void rcu_tasks_trace_pregp_step(struct list_head *hop) // allow safe access to the hop list. for_each_online_cpu(cpu) { rcu_read_lock(); + // Note that cpu_curr_snapshot() picks up the target + // CPU's current task while its runqueue is locked with + // an smp_mb__after_spinlock(). This ensures that either + // the grace-period kthread will see that task's read-side + // critical section or the task will see the updater's pre-GP + // accesses. The trailing smp_mb() in cpu_curr_snapshot() + // does not currently play a role other than simplify + // that function's ordering semantics. If these simplified + // ordering semantics continue to be redundant, that smp_mb() + // might be removed. t = cpu_curr_snapshot(cpu); if (rcu_tasks_trace_pertask_prep(t, true)) trc_add_holdout(t, hop); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 59ce0841eb1f..ebf21373f663 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1326,27 +1326,24 @@ int tg_nop(struct task_group *tg, void *data) static void set_load_weight(struct task_struct *p, bool update_load) { int prio = p->static_prio - MAX_RT_PRIO; - struct load_weight *load = &p->se.load; + struct load_weight lw; - /* - * SCHED_IDLE tasks get minimal weight: - */ if (task_has_idle_policy(p)) { - load->weight = scale_load(WEIGHT_IDLEPRIO); - load->inv_weight = WMULT_IDLEPRIO; - return; + lw.weight = scale_load(WEIGHT_IDLEPRIO); + lw.inv_weight = WMULT_IDLEPRIO; + } else { + lw.weight = scale_load(sched_prio_to_weight[prio]); + lw.inv_weight = sched_prio_to_wmult[prio]; } /* * SCHED_OTHER tasks have to update their load when changing their * weight */ - if (update_load && p->sched_class == &fair_sched_class) { - reweight_task(p, prio); - } else { - load->weight = scale_load(sched_prio_to_weight[prio]); - load->inv_weight = sched_prio_to_wmult[prio]; - } + if (update_load && p->sched_class == &fair_sched_class) + reweight_task(p, &lw); + else + p->se.load = lw; } #ifdef CONFIG_UCLAMP_TASK @@ -4466,12 +4463,7 @@ int task_call_func(struct task_struct *p, task_call_f func, void *arg) * @cpu: The CPU on which to snapshot the task. * * Returns the task_struct pointer of the task "currently" running on - * the specified CPU. If the same task is running on that CPU throughout, - * the return value will be a pointer to that task's task_struct structure. - * If the CPU did any context switches even vaguely concurrently with the - * execution of this function, the return value will be a pointer to the - * task_struct structure of a randomly chosen task that was running on - * that CPU somewhere around the time that this function was executing. + * the specified CPU. * * If the specified CPU was offline, the return value is whatever it * is, perhaps a pointer to the task_struct structure of that CPU's idle @@ -4485,11 +4477,16 @@ int task_call_func(struct task_struct *p, task_call_f func, void *arg) */ struct task_struct *cpu_curr_snapshot(int cpu) { + struct rq *rq = cpu_rq(cpu); struct task_struct *t; + struct rq_flags rf; - smp_mb(); /* Pairing determined by caller's synchronization design. */ + rq_lock_irqsave(rq, &rf); + smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */ t = rcu_dereference(cpu_curr(cpu)); + rq_unlock_irqrestore(rq, &rf); smp_mb(); /* Pairing determined by caller's synchronization design. */ + return t; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 24dda708b699..483c137b9d3d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3835,15 +3835,14 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, } } -void reweight_task(struct task_struct *p, int prio) +void reweight_task(struct task_struct *p, const struct load_weight *lw) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); struct load_weight *load = &se->load; - unsigned long weight = scale_load(sched_prio_to_weight[prio]); - reweight_entity(cfs_rq, se, weight); - load->inv_weight = sched_prio_to_wmult[prio]; + reweight_entity(cfs_rq, se, lw->weight); + load->inv_weight = lw->inv_weight; } static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ef20c61004eb..38aeedd8a6cc 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2464,7 +2464,7 @@ extern void init_sched_dl_class(void); extern void init_sched_rt_class(void); extern void init_sched_fair_class(void); -extern void reweight_task(struct task_struct *p, int prio); +extern void reweight_task(struct task_struct *p, const struct load_weight *lw); extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); diff --git a/kernel/signal.c b/kernel/signal.c index 1f9dd41c04be..60c737e423a1 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2600,6 +2600,14 @@ static void do_freezer_trap(void) spin_unlock_irq(¤t->sighand->siglock); cgroup_enter_frozen(); schedule(); + + /* + * We could've been woken by task_work, run it to clear + * TIF_NOTIFY_SIGNAL. The caller will retry if necessary. + */ + clear_notify_signal(); + if (unlikely(task_work_pending(current))) + task_work_run(); } static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type) diff --git a/kernel/task_work.c b/kernel/task_work.c index 95a7e1b7f1da..2134ac8057a9 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -120,9 +120,9 @@ static bool task_work_func_match(struct callback_head *cb, void *data) } /** - * task_work_cancel - cancel a pending work added by task_work_add() - * @task: the task which should execute the work - * @func: identifies the work to remove + * task_work_cancel_func - cancel a pending work matching a function added by task_work_add() + * @task: the task which should execute the func's work + * @func: identifies the func to match with a work to remove * * Find the last queued pending work with ->func == @func and remove * it from queue. @@ -131,11 +131,35 @@ static bool task_work_func_match(struct callback_head *cb, void *data) * The found work or NULL if not found. */ struct callback_head * -task_work_cancel(struct task_struct *task, task_work_func_t func) +task_work_cancel_func(struct task_struct *task, task_work_func_t func) { return task_work_cancel_match(task, task_work_func_match, func); } +static bool task_work_match(struct callback_head *cb, void *data) +{ + return cb == data; +} + +/** + * task_work_cancel - cancel a pending work added by task_work_add() + * @task: the task which should execute the work + * @cb: the callback to remove if queued + * + * Remove a callback from a task's queue if queued. + * + * RETURNS: + * True if the callback was queued and got cancelled, false otherwise. + */ +bool task_work_cancel(struct task_struct *task, struct callback_head *cb) +{ + struct callback_head *ret; + + ret = task_work_cancel_match(task, task_work_match, cb); + + return ret == cb; +} + /** * task_work_run - execute the works added by task_work_add() * @@ -168,7 +192,7 @@ void task_work_run(void) if (!work) break; /* - * Synchronize with task_work_cancel(). It can not remove + * Synchronize with task_work_cancel_match(). It can not remove * the first entry == work, cmpxchg(task_works) must fail. * But it can remove another entry from the ->next list. */ diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 771d1e040303..b4843099a8da 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -1141,6 +1141,7 @@ void tick_broadcast_switch_to_oneshot(void) #ifdef CONFIG_HOTPLUG_CPU void hotplug_cpu__broadcast_tick_pull(int deadcpu) { + struct tick_device *td = this_cpu_ptr(&tick_cpu_device); struct clock_event_device *bc; unsigned long flags; @@ -1148,6 +1149,28 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu) bc = tick_broadcast_device.evtdev; if (bc && broadcast_needs_cpu(bc, deadcpu)) { + /* + * If the broadcast force bit of the current CPU is set, + * then the current CPU has not yet reprogrammed the local + * timer device to avoid a ping-pong race. See + * ___tick_broadcast_oneshot_control(). + * + * If the broadcast device is hrtimer based then + * programming the broadcast event below does not have any + * effect because the local clockevent device is not + * running and not programmed because the broadcast event + * is not earlier than the pending event of the local clock + * event device. As a consequence all CPUs waiting for a + * broadcast event are stuck forever. + * + * Detect this condition and reprogram the cpu local timer + * device to avoid the starvation. + */ + if (tick_check_broadcast_expired()) { + cpumask_clear_cpu(smp_processor_id(), tick_broadcast_force_mask); + tick_program_event(td->evtdev->next_event, 1); + } + /* This moves the broadcast assignment to this CPU: */ clockevents_program_event(bc, bc->next_event, 1); } diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 84413114db5c..d91efe1dc3bf 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -507,7 +507,14 @@ static void walk_groups(up_f up, void *data, struct tmigr_cpu *tmc) * (get_next_timer_interrupt()) * @firstexp: Contains the first event expiry information when last * active CPU of hierarchy is on the way to idle to make - * sure CPU will be back in time. + * sure CPU will be back in time. It is updated in top + * level group only. Be aware, there could occur a new top + * level of the hierarchy between the 'top level call' in + * tmigr_update_events() and the check for the parent group + * in walk_groups(). Then @firstexp might contain a value + * != KTIME_MAX even if it was not the final top + * level. This is not a problem, as the worst outcome is a + * CPU which might wake up a little early. * @evt: Pointer to tmigr_event which needs to be queued (of idle * child group) * @childmask: childmask of child group @@ -649,7 +656,7 @@ static bool tmigr_active_up(struct tmigr_group *group, } while (!atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state)); - if ((walk_done == false) && group->parent) + if (walk_done == false) data->childmask = group->childmask; /* @@ -1317,20 +1324,9 @@ static bool tmigr_inactive_up(struct tmigr_group *group, /* Event Handling */ tmigr_update_events(group, child, data); - if (group->parent && (walk_done == false)) + if (walk_done == false) data->childmask = group->childmask; - /* - * data->firstexp was set by tmigr_update_events() and contains the - * expiry of the first global event which needs to be handled. It - * differs from KTIME_MAX if: - * - group is the top level group and - * - group is idle (which means CPU was the last active CPU in the - * hierarchy) and - * - there is a pending event in the hierarchy - */ - WARN_ON_ONCE(data->firstexp != KTIME_MAX && group->parent); - trace_tmigr_group_set_cpu_inactive(group, newstate, childmask); return walk_done; @@ -1552,10 +1548,11 @@ static void tmigr_connect_child_parent(struct tmigr_group *child, data.childmask = child->childmask; /* - * There is only one new level per time. When connecting the - * child and the parent and set the child active when the parent - * is inactive, the parent needs to be the uppermost - * level. Otherwise there went something wrong! + * There is only one new level per time (which is protected by + * tmigr_mutex). When connecting the child and the parent and + * set the child active when the parent is inactive, the parent + * needs to be the uppermost level. Otherwise there went + * something wrong! */ WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent); } diff --git a/kernel/time/timer_migration.h b/kernel/time/timer_migration.h index 6c37d94a37d9..494f68cc13f4 100644 --- a/kernel/time/timer_migration.h +++ b/kernel/time/timer_migration.h @@ -22,7 +22,17 @@ struct tmigr_event { * struct tmigr_group - timer migration hierarchy group * @lock: Lock protecting the event information and group hierarchy * information during setup - * @parent: Pointer to the parent group + * @parent: Pointer to the parent group. Pointer is updated when a + * new hierarchy level is added because of a CPU coming + * online the first time. Once it is set, the pointer will + * not be removed or updated. When accessing parent pointer + * lock less to decide whether to abort a propagation or + * not, it is not a problem. The worst outcome is an + * unnecessary/early CPU wake up. But do not access parent + * pointer several times in the same 'action' (like + * activation, deactivation, check for remote expiry,...) + * without holding the lock as it is not ensured that value + * will not change. * @groupevt: Next event of the group which is only used when the * group is !active. The group event is then queued into * the parent timer queue. diff --git a/kernel/trace/pid_list.c b/kernel/trace/pid_list.c index 95106d02b32d..85de221c0b6f 100644 --- a/kernel/trace/pid_list.c +++ b/kernel/trace/pid_list.c @@ -354,7 +354,7 @@ static void pid_list_refill_irq(struct irq_work *iwork) while (upper_count-- > 0) { union upper_chunk *chunk; - chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); if (!chunk) break; *upper_next = chunk; @@ -365,7 +365,7 @@ static void pid_list_refill_irq(struct irq_work *iwork) while (lower_count-- > 0) { union lower_chunk *chunk; - chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); if (!chunk) break; *lower_next = chunk; diff --git a/kernel/watchdog_perf.c b/kernel/watchdog_perf.c index d577c4a8321e..59c1d86a73a2 100644 --- a/kernel/watchdog_perf.c +++ b/kernel/watchdog_perf.c @@ -75,11 +75,15 @@ static bool watchdog_check_timestamp(void) __this_cpu_write(last_timestamp, now); return true; } -#else -static inline bool watchdog_check_timestamp(void) + +static void watchdog_init_timestamp(void) { - return true; + __this_cpu_write(nmi_rearmed, 0); + __this_cpu_write(last_timestamp, ktime_get_mono_fast_ns()); } +#else +static inline bool watchdog_check_timestamp(void) { return true; } +static inline void watchdog_init_timestamp(void) { } #endif static struct perf_event_attr wd_hw_attr = { @@ -161,6 +165,7 @@ void watchdog_hardlockup_enable(unsigned int cpu) if (!atomic_fetch_inc(&watchdog_cpus)) pr_info("Enabled. Permanently consumes one hw-PMU counter.\n"); + watchdog_init_timestamp(); perf_event_enable(this_cpu_read(watchdog_ev)); } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 3fbaecfc88c2..f98247ec99c2 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2298,9 +2298,13 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, * If @work was previously on a different pool, it might still be * running there, in which case the work needs to be queued on that * pool to guarantee non-reentrancy. + * + * For ordered workqueue, work items must be queued on the newest pwq + * for accurate order management. Guaranteed order also guarantees + * non-reentrancy. See the comments above unplug_oldest_pwq(). */ last_pool = get_work_pool(work); - if (last_pool && last_pool != pool) { + if (last_pool && last_pool != pool && !(wq->flags & __WQ_ORDERED)) { struct worker *worker; raw_spin_lock(&last_pool->lock); diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 3518e7394eca..ca736166f100 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c @@ -232,7 +232,8 @@ static int INIT get_next_block(struct bunzip_data *bd) RUNB) */ symCount = symTotal+2; for (j = 0; j < groupCount; j++) { - unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1]; + unsigned char length[MAX_SYMBOLS]; + unsigned short temp[MAX_HUFCODE_BITS+1]; int minLen, maxLen, pp; /* Read Huffman code lengths for each symbol. They're stored in a way similar to mtf; record a starting diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 03b427e2707e..b7f2fa08d9c8 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -433,8 +433,23 @@ static void zap_modalias_env(struct kobj_uevent_env *env) len = strlen(env->envp[i]) + 1; if (i != env->envp_idx - 1) { + /* @env->envp[] contains pointers to @env->buf[] + * with @env->buflen chars, and we are removing + * variable MODALIAS here pointed by @env->envp[i] + * with length @len as shown below: + * + * 0 @env->buf[] @env->buflen + * --------------------------------------------- + * ^ ^ ^ ^ + * | |-> @len <-| target block | + * @env->envp[0] @env->envp[i] @env->envp[i + 1] + * + * so the "target block" indicated above is moved + * backward by @len, and its right size is + * @env->buflen - (@env->envp[i + 1] - @env->envp[0]). + */ memmove(env->envp[i], env->envp[i + 1], - env->buflen - len); + env->buflen - (env->envp[i + 1] - env->envp[0])); for (j = i; j < env->envp_idx - 1; j++) env->envp[j] = env->envp[j + 1] - len; diff --git a/lib/objagg.c b/lib/objagg.c index 1e248629ed64..1608895b009c 100644 --- a/lib/objagg.c +++ b/lib/objagg.c @@ -167,6 +167,9 @@ static int objagg_obj_parent_assign(struct objagg *objagg, { void *delta_priv; + if (WARN_ON(!objagg_obj_is_root(parent))) + return -EINVAL; + delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj, objagg_obj->obj); if (IS_ERR(delta_priv)) @@ -903,20 +906,6 @@ static const struct objagg_opt_algo *objagg_opt_algos[] = { [OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy, }; -static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg, - const void *obj) -{ - struct rhashtable *ht = arg->ht; - struct objagg_hints *objagg_hints = - container_of(ht, struct objagg_hints, node_ht); - const struct objagg_ops *ops = objagg_hints->ops; - const char *ptr = obj; - - ptr += ht->p.key_offset; - return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) : - memcmp(ptr, arg->key, ht->p.key_len); -} - /** * objagg_hints_get - obtains hints instance * @objagg: objagg instance @@ -955,7 +944,6 @@ struct objagg_hints *objagg_hints_get(struct objagg *objagg, offsetof(struct objagg_hints_node, obj); objagg_hints->ht_params.head_offset = offsetof(struct objagg_hints_node, ht_node); - objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp; err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params); if (err) diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 1e453f825c05..5e2e93307f0d 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -60,12 +60,30 @@ static inline void update_alloc_hint_after_get(struct sbitmap *sb, /* * See if we have deferred clears that we can batch move */ -static inline bool sbitmap_deferred_clear(struct sbitmap_word *map) +static inline bool sbitmap_deferred_clear(struct sbitmap_word *map, + unsigned int depth, unsigned int alloc_hint, bool wrap) { - unsigned long mask; + unsigned long mask, word_mask; - if (!READ_ONCE(map->cleared)) - return false; + guard(spinlock_irqsave)(&map->swap_lock); + + if (!map->cleared) { + if (depth == 0) + return false; + + word_mask = (~0UL) >> (BITS_PER_LONG - depth); + /* + * The current behavior is to always retry after moving + * ->cleared to word, and we change it to retry in case + * of any free bits. To avoid an infinite loop, we need + * to take wrap & alloc_hint into account, otherwise a + * soft lockup may occur. + */ + if (!wrap && alloc_hint) + word_mask &= ~((1UL << alloc_hint) - 1); + + return (READ_ONCE(map->word) & word_mask) != word_mask; + } /* * First get a stable cleared mask, setting the old mask to 0. @@ -85,6 +103,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, bool alloc_hint) { unsigned int bits_per_word; + int i; if (shift < 0) shift = sbitmap_calculate_shift(depth); @@ -116,6 +135,9 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, return -ENOMEM; } + for (i = 0; i < sb->map_nr; i++) + spin_lock_init(&sb->map[i].swap_lock); + return 0; } EXPORT_SYMBOL_GPL(sbitmap_init_node); @@ -126,7 +148,7 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth) unsigned int i; for (i = 0; i < sb->map_nr; i++) - sbitmap_deferred_clear(&sb->map[i]); + sbitmap_deferred_clear(&sb->map[i], 0, 0, 0); sb->depth = depth; sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); @@ -179,7 +201,7 @@ static int sbitmap_find_bit_in_word(struct sbitmap_word *map, alloc_hint, wrap); if (nr != -1) break; - if (!sbitmap_deferred_clear(map)) + if (!sbitmap_deferred_clear(map, depth, alloc_hint, wrap)) break; } while (1); @@ -496,7 +518,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, unsigned int map_depth = __map_depth(sb, index); unsigned long val; - sbitmap_deferred_clear(map); + sbitmap_deferred_clear(map, 0, 0, 0); val = READ_ONCE(map->word); if (val == (1UL << (map_depth - 1)) - 1) goto next; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2120f7478e55..374a0d54b08d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -88,9 +88,17 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, bool smaps = tva_flags & TVA_SMAPS; bool in_pf = tva_flags & TVA_IN_PF; bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS; + unsigned long supported_orders; + /* Check the intersection of requested and supported orders. */ - orders &= vma_is_anonymous(vma) ? - THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE; + if (vma_is_anonymous(vma)) + supported_orders = THP_ORDERS_ALL_ANON; + else if (vma_is_dax(vma)) + supported_orders = THP_ORDERS_ALL_FILE_DAX; + else + supported_orders = THP_ORDERS_ALL_FILE_DEFAULT; + + orders &= supported_orders; if (!orders) return 0; @@ -857,7 +865,7 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, loff_t off_align = round_up(off, size); unsigned long len_pad, ret, off_sub; - if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall()) + if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()) return 0; if (off_end <= off_align || (off_end - off_align) < size) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 43e1af868cfd..92a2e8dcb796 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2586,6 +2586,23 @@ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); } +static nodemask_t *policy_mbind_nodemask(gfp_t gfp) +{ +#ifdef CONFIG_NUMA + struct mempolicy *mpol = get_task_policy(current); + + /* + * Only enforce MPOL_BIND policy which overlaps with cpuset policy + * (from policy_nodemask) specifically for hugetlb case + */ + if (mpol->mode == MPOL_BIND && + (apply_policy_zone(mpol, gfp_zone(gfp)) && + cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) + return &mpol->nodes; +#endif + return NULL; +} + /* * Increase the hugetlb pool such that it can accommodate a reservation * of size 'delta'. @@ -2599,6 +2616,8 @@ static int gather_surplus_pages(struct hstate *h, long delta) long i; long needed, allocated; bool alloc_ok = true; + int node; + nodemask_t *mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); lockdep_assert_held(&hugetlb_lock); needed = (h->resv_huge_pages + delta) - h->free_huge_pages; @@ -2613,8 +2632,15 @@ static int gather_surplus_pages(struct hstate *h, long delta) retry: spin_unlock_irq(&hugetlb_lock); for (i = 0; i < needed; i++) { - folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), - NUMA_NO_NODE, NULL); + folio = NULL; + for_each_node_mask(node, cpuset_current_mems_allowed) { + if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) { + folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), + node, NULL); + if (folio) + break; + } + } if (!folio) { alloc_ok = false; break; @@ -4617,7 +4643,7 @@ void __init hugetlb_add_hstate(unsigned int order) BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); BUG_ON(order < order_base_2(__NR_USED_SUBPAGE)); h = &hstates[hugetlb_max_hstate++]; - mutex_init(&h->resize_lock); + __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key); h->order = order; h->mask = ~(huge_page_size(h) - 1); for (i = 0; i < MAX_NUMNODES; ++i) @@ -4840,23 +4866,6 @@ static int __init default_hugepagesz_setup(char *s) } __setup("default_hugepagesz=", default_hugepagesz_setup); -static nodemask_t *policy_mbind_nodemask(gfp_t gfp) -{ -#ifdef CONFIG_NUMA - struct mempolicy *mpol = get_task_policy(current); - - /* - * Only enforce MPOL_BIND policy which overlaps with cpuset policy - * (from policy_nodemask) specifically for hugetlb case - */ - if (mpol->mode == MPOL_BIND && - (apply_policy_zone(mpol, gfp_zone(gfp)) && - cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) - return &mpol->nodes; -#endif - return NULL; -} - static unsigned int allowed_mems_nr(struct hstate *h) { int node; diff --git a/mm/memory.c b/mm/memory.c index d10e616d7389..f81760c93801 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4681,7 +4681,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, { struct vm_area_struct *vma = vmf->vma; bool write = vmf->flags & FAULT_FLAG_WRITE; - bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE); + bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE); pte_t entry; flush_icache_pages(vma, page, nr); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index aec756ae5637..a1bf9aa15c33 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -3293,8 +3293,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol) * @pol: pointer to mempolicy to be formatted * * Convert @pol into a string. If @buffer is too short, truncate the string. - * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the - * longest flag, "relative", and to display at least a few node ids. + * Recommend a @maxlen of at least 51 for the longest mode, "weighted + * interleave", plus the longest flag flags, "relative|balancing", and to + * display at least a few node ids. */ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) { @@ -3303,7 +3304,10 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) unsigned short mode = MPOL_DEFAULT; unsigned short flags = 0; - if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { + if (pol && + pol != &default_policy && + !(pol >= &preferred_node_policy[0] && + pol <= &preferred_node_policy[ARRAY_SIZE(preferred_node_policy) - 1])) { mode = pol->mode; flags = pol->flags; } @@ -3331,12 +3335,18 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) p += snprintf(p, buffer + maxlen - p, "="); /* - * Currently, the only defined flags are mutually exclusive + * Static and relative are mutually exclusive. */ if (flags & MPOL_F_STATIC_NODES) p += snprintf(p, buffer + maxlen - p, "static"); else if (flags & MPOL_F_RELATIVE_NODES) p += snprintf(p, buffer + maxlen - p, "relative"); + + if (flags & MPOL_F_NUMA_BALANCING) { + if (!is_power_of_2(flags & MPOL_MODE_FLAGS)) + p += snprintf(p, buffer + maxlen - p, "|"); + p += snprintf(p, buffer + maxlen - p, "balancing"); + } } if (!nodes_empty(nodes)) diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c index 1854850b4b89..368b840e7508 100644 --- a/mm/mmap_lock.c +++ b/mm/mmap_lock.c @@ -19,14 +19,7 @@ EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released); #ifdef CONFIG_MEMCG -/* - * Our various events all share the same buffer (because we don't want or need - * to allocate a set of buffers *per event type*), so we need to protect against - * concurrent _reg() and _unreg() calls, and count how many _reg() calls have - * been made. - */ -static DEFINE_MUTEX(reg_lock); -static int reg_refcount; /* Protected by reg_lock. */ +static atomic_t reg_refcount; /* * Size of the buffer for memcg path names. Ignoring stack trace support, @@ -34,136 +27,22 @@ static int reg_refcount; /* Protected by reg_lock. */ */ #define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL -/* - * How many contexts our trace events might be called in: normal, softirq, irq, - * and NMI. - */ -#define CONTEXT_COUNT 4 - -struct memcg_path { - local_lock_t lock; - char __rcu *buf; - local_t buf_idx; -}; -static DEFINE_PER_CPU(struct memcg_path, memcg_paths) = { - .lock = INIT_LOCAL_LOCK(lock), - .buf_idx = LOCAL_INIT(0), -}; - -static char **tmp_bufs; - -/* Called with reg_lock held. */ -static void free_memcg_path_bufs(void) -{ - struct memcg_path *memcg_path; - int cpu; - char **old = tmp_bufs; - - for_each_possible_cpu(cpu) { - memcg_path = per_cpu_ptr(&memcg_paths, cpu); - *(old++) = rcu_dereference_protected(memcg_path->buf, - lockdep_is_held(®_lock)); - rcu_assign_pointer(memcg_path->buf, NULL); - } - - /* Wait for inflight memcg_path_buf users to finish. */ - synchronize_rcu(); - - old = tmp_bufs; - for_each_possible_cpu(cpu) { - kfree(*(old++)); - } - - kfree(tmp_bufs); - tmp_bufs = NULL; -} - int trace_mmap_lock_reg(void) { - int cpu; - char *new; - - mutex_lock(®_lock); - - /* If the refcount is going 0->1, proceed with allocating buffers. */ - if (reg_refcount++) - goto out; - - tmp_bufs = kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs), - GFP_KERNEL); - if (tmp_bufs == NULL) - goto out_fail; - - for_each_possible_cpu(cpu) { - new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL); - if (new == NULL) - goto out_fail_free; - rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new); - /* Don't need to wait for inflights, they'd have gotten NULL. */ - } - -out: - mutex_unlock(®_lock); + atomic_inc(®_refcount); return 0; - -out_fail_free: - free_memcg_path_bufs(); -out_fail: - /* Since we failed, undo the earlier ref increment. */ - --reg_refcount; - - mutex_unlock(®_lock); - return -ENOMEM; } void trace_mmap_lock_unreg(void) { - mutex_lock(®_lock); - - /* If the refcount is going 1->0, proceed with freeing buffers. */ - if (--reg_refcount) - goto out; - - free_memcg_path_bufs(); - -out: - mutex_unlock(®_lock); -} - -static inline char *get_memcg_path_buf(void) -{ - struct memcg_path *memcg_path = this_cpu_ptr(&memcg_paths); - char *buf; - int idx; - - rcu_read_lock(); - buf = rcu_dereference(memcg_path->buf); - if (buf == NULL) { - rcu_read_unlock(); - return NULL; - } - idx = local_add_return(MEMCG_PATH_BUF_SIZE, &memcg_path->buf_idx) - - MEMCG_PATH_BUF_SIZE; - return &buf[idx]; + atomic_dec(®_refcount); } -static inline void put_memcg_path_buf(void) -{ - local_sub(MEMCG_PATH_BUF_SIZE, &this_cpu_ptr(&memcg_paths)->buf_idx); - rcu_read_unlock(); -} - -#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \ - do { \ - const char *memcg_path; \ - local_lock(&memcg_paths.lock); \ - memcg_path = get_mm_memcg_path(mm); \ - trace_mmap_lock_##type(mm, \ - memcg_path != NULL ? memcg_path : "", \ - ##__VA_ARGS__); \ - if (likely(memcg_path != NULL)) \ - put_memcg_path_buf(); \ - local_unlock(&memcg_paths.lock); \ +#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \ + do { \ + char buf[MEMCG_PATH_BUF_SIZE]; \ + get_mm_memcg_path(mm, buf, sizeof(buf)); \ + trace_mmap_lock_##type(mm, buf, ##__VA_ARGS__); \ } while (0) #else /* !CONFIG_MEMCG */ @@ -185,37 +64,23 @@ void trace_mmap_lock_unreg(void) #ifdef CONFIG_TRACING #ifdef CONFIG_MEMCG /* - * Write the given mm_struct's memcg path to a percpu buffer, and return a - * pointer to it. If the path cannot be determined, or no buffer was available - * (because the trace event is being unregistered), NULL is returned. - * - * Note: buffers are allocated per-cpu to avoid locking, so preemption must be - * disabled by the caller before calling us, and re-enabled only after the - * caller is done with the pointer. - * - * The caller must call put_memcg_path_buf() once the buffer is no longer - * needed. This must be done while preemption is still disabled. + * Write the given mm_struct's memcg path to a buffer. If the path cannot be + * determined or the trace event is being unregistered, empty string is written. */ -static const char *get_mm_memcg_path(struct mm_struct *mm) +static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen) { - char *buf = NULL; - struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); + struct mem_cgroup *memcg; + buf[0] = '\0'; + /* No need to get path if no trace event is registered. */ + if (!atomic_read(®_refcount)) + return; + memcg = get_mem_cgroup_from_mm(mm); if (memcg == NULL) - goto out; - if (unlikely(memcg->css.cgroup == NULL)) - goto out_put; - - buf = get_memcg_path_buf(); - if (buf == NULL) - goto out_put; - - cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE); - -out_put: + return; + if (memcg->css.cgroup) + cgroup_path(memcg->css.cgroup, buf, buflen); css_put(&memcg->css); -out: - return buf; } #endif /* CONFIG_MEMCG */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9ecf99190ea2..df2c442f1c47 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2323,16 +2323,20 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) static void drain_pages_zone(unsigned int cpu, struct zone *zone) { struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); - int count = READ_ONCE(pcp->count); - - while (count) { - int to_drain = min(count, pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); - count -= to_drain; + int count; + do { spin_lock(&pcp->lock); - free_pcppages_bulk(zone, to_drain, pcp, 0); + count = pcp->count; + if (count) { + int to_drain = min(count, + pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); + + free_pcppages_bulk(zone, to_drain, pcp, 0); + count -= to_drain; + } spin_unlock(&pcp->lock); - } + } while (count); } /* @@ -5805,6 +5809,23 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char return pages; } +void free_reserved_page(struct page *page) +{ + if (mem_alloc_profiling_enabled()) { + union codetag_ref *ref = get_page_tag_ref(page); + + if (ref) { + set_codetag_empty(ref); + put_page_tag_ref(ref); + } + } + ClearPageReserved(page); + init_page_count(page); + __free_page(page); + adjust_managed_page_count(page, 1); +} +EXPORT_SYMBOL(free_reserved_page); + static int page_alloc_cpu_dead(unsigned int cpu) { struct zone *zone; diff --git a/mm/vmscan.c b/mm/vmscan.c index 2e34de9cd0d4..68ac33bea3a3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3900,6 +3900,32 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq, * working set protection ******************************************************************************/ +static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc) +{ + int priority; + unsigned long reclaimable; + + if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) + return; + /* + * Determine the initial priority based on + * (total >> priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, + * where reclaimed_to_scanned_ratio = inactive / total. + */ + reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE); + if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) + reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON); + + /* round down reclaimable and round up sc->nr_to_reclaim */ + priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); + + /* + * The estimation is based on LRU pages only, so cap it to prevent + * overshoots of shrinker objects by large margins. + */ + sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY); +} + static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc) { int gen, type, zone; @@ -3933,19 +3959,17 @@ static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc struct mem_cgroup *memcg = lruvec_memcg(lruvec); DEFINE_MIN_SEQ(lruvec); - /* see the comment on lru_gen_folio */ - gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); - birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); - - if (time_is_after_jiffies(birth + min_ttl)) + if (mem_cgroup_below_min(NULL, memcg)) return false; if (!lruvec_is_sizable(lruvec, sc)) return false; - mem_cgroup_calculate_protection(NULL, memcg); + /* see the comment on lru_gen_folio */ + gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); + birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); - return !mem_cgroup_below_min(NULL, memcg); + return time_is_before_jiffies(birth + min_ttl); } /* to protect the working set of the last N jiffies */ @@ -3955,23 +3979,20 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) { struct mem_cgroup *memcg; unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl); + bool reclaimable = !min_ttl; VM_WARN_ON_ONCE(!current_is_kswapd()); - /* check the order to exclude compaction-induced reclaim */ - if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY) - return; + set_initial_priority(pgdat, sc); memcg = mem_cgroup_iter(NULL, NULL, NULL); do { struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); - if (lruvec_is_reclaimable(lruvec, sc, min_ttl)) { - mem_cgroup_iter_break(NULL, memcg); - return; - } + mem_cgroup_calculate_protection(NULL, memcg); - cond_resched(); + if (!reclaimable) + reclaimable = lruvec_is_reclaimable(lruvec, sc, min_ttl); } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); /* @@ -3979,7 +4000,7 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) * younger than min_ttl. However, another possibility is all memcgs are * either too small or below min. */ - if (mutex_trylock(&oom_lock)) { + if (!reclaimable && mutex_trylock(&oom_lock)) { struct oom_control oc = { .gfp_mask = sc->gfp_mask, }; @@ -4582,7 +4603,6 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap /* retry folios that may have missed folio_rotate_reclaimable() */ list_move(&folio->lru, &clean); - sc->nr_scanned -= folio_nr_pages(folio); } spin_lock_irq(&lruvec->lru_lock); @@ -4772,8 +4792,7 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc) struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct pglist_data *pgdat = lruvec_pgdat(lruvec); - mem_cgroup_calculate_protection(NULL, memcg); - + /* lru_gen_age_node() called mem_cgroup_calculate_protection() */ if (mem_cgroup_below_min(NULL, memcg)) return MEMCG_LRU_YOUNG; @@ -4897,28 +4916,6 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc blk_finish_plug(&plug); } -static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc) -{ - int priority; - unsigned long reclaimable; - - if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) - return; - /* - * Determine the initial priority based on - * (total >> priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, - * where reclaimed_to_scanned_ratio = inactive / total. - */ - reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE); - if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) - reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON); - - /* round down reclaimable and round up sc->nr_to_reclaim */ - priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); - - sc->priority = clamp(priority, 0, DEF_PRIORITY); -} - static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) { struct blk_plug plug; @@ -6702,6 +6699,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, { struct zone *zone; int z; + unsigned long nr_reclaimed = sc->nr_reclaimed; /* Reclaim a number of pages proportional to the number of zones */ sc->nr_to_reclaim = 0; @@ -6729,7 +6727,8 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) sc->order = 0; - return sc->nr_scanned >= sc->nr_to_reclaim; + /* account for progress from mm_account_reclaimed_pages() */ + return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim; } /* Page allocator PCP high watermark is lowered if reclaim is active. */ diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index c644b30977bd..7ae118a6d947 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -718,8 +718,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) switch (cmd) { case HCISETAUTH: - err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, - 1, &dr.dev_opt, HCI_CMD_TIMEOUT); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); break; case HCISETENCRYPT: @@ -730,23 +730,21 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */ - err = __hci_cmd_sync_status(hdev, - HCI_OP_WRITE_AUTH_ENABLE, - 1, &dr.dev_opt, - HCI_CMD_TIMEOUT); + err = hci_cmd_sync_status(hdev, + HCI_OP_WRITE_AUTH_ENABLE, + 1, &dr.dev_opt, + HCI_CMD_TIMEOUT); if (err) break; } - err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, - 1, &dr.dev_opt, - HCI_CMD_TIMEOUT); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); break; case HCISETSCAN: - err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, - 1, &dr.dev_opt, - HCI_CMD_TIMEOUT); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); /* Ensure that the connectable and discoverable states * get correctly modified as this was a non-mgmt change. @@ -758,9 +756,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) case HCISETLINKPOL: policy = cpu_to_le16(dr.dev_opt); - err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, - 2, &policy, - HCI_CMD_TIMEOUT); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, + 2, &policy, HCI_CMD_TIMEOUT); break; case HCISETLINKMODE: diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 93f7ac905cec..4611a67d7dcc 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -6988,6 +6988,8 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, if (!pa_sync) goto unlock; + pa_sync->iso_qos.bcast.encryption = ev->encryption; + /* Notify iso layer */ hci_connect_cfm(pa_sync, 0); diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index eea34e6a236f..bb704088559f 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -371,8 +371,6 @@ static void le_scan_disable(struct work_struct *work) goto _return; } - hdev->discovery.scan_start = 0; - /* If we were running LE only scan, change discovery state. If * we were running both LE and BR/EDR inquiry simultaneously, * and BR/EDR inquiry is already finished, stop discovery, diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index d97064d460dc..e19b583ff2c6 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -25,8 +25,8 @@ static inline int should_deliver(const struct net_bridge_port *p, vg = nbp_vlan_group_rcu(p); return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && - p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) && - nbp_switchdev_allowed_egress(p, skb) && + (br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) && + br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) && !br_skb_isolated(p, skb); } diff --git a/net/core/filter.c b/net/core/filter.c index 9933851c685e..110692c1dd95 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3544,13 +3544,20 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); - /* Due to header grow, MSS needs to be downgraded. */ - if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) - skb_decrease_gso_size(shinfo, len_diff); - /* Header must be checked, and gso_segs recomputed. */ shinfo->gso_type |= gso_type; shinfo->gso_segs = 0; + + /* Due to header growth, MSS needs to be downgraded. + * There is a BUG_ON() when segmenting the frag_list with + * head_frag true, so linearize the skb after downgrading + * the MSS. + */ + if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) { + skb_decrease_gso_size(shinfo, len_diff); + if (shinfo->frag_list) + return skb_linearize(skb); + } } return 0; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index f82e9a7d3b37..7b54f44f5372 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1101,7 +1101,7 @@ bool __skb_flow_dissect(const struct net *net, } } - WARN_ON_ONCE(!net); + DEBUG_NET_WARN_ON_ONCE(!net); if (net) { enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; struct bpf_prog_array *run_array; diff --git a/net/core/page_pool.c b/net/core/page_pool.c index f4444b4e39e6..3772eb63dcad 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -445,7 +445,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page) return true; unmap_failed: - WARN_ON_ONCE("unexpected DMA address, please report to netdev@"); + WARN_ONCE(1, "unexpected DMA address, please report to netdev@"); dma_unmap_page_attrs(pool->p.dev, dma, PAGE_SIZE << pool->p.order, pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); diff --git a/net/core/xdp.c b/net/core/xdp.c index 022c12059cf2..bcc5551c6424 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -127,10 +127,8 @@ void xdp_unreg_mem_model(struct xdp_mem_info *mem) return; if (type == MEM_TYPE_PAGE_POOL) { - rcu_read_lock(); - xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); + xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); page_pool_destroy(xa->page_pool); - rcu_read_unlock(); } } EXPORT_SYMBOL_GPL(xdp_unreg_mem_model); diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c index 2c981d443f27..776ac96cdadc 100644 --- a/net/ethtool/pse-pd.c +++ b/net/ethtool/pse-pd.c @@ -178,12 +178,14 @@ ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info) phydev = dev->phydev; /* These values are already validated by the ethnl_pse_set_policy */ - if (pse_has_podl(phydev->psec)) + if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]) config.podl_admin_control = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]); - if (pse_has_c33(phydev->psec)) + if (tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]) config.c33_admin_control = nla_get_u32(tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]); - /* Return errno directly - PSE has no notification */ + /* Return errno directly - PSE has no notification + * pse_ethtool_set_config() will do nothing if the config is null + */ return pse_ethtool_set_config(phydev->psec, info->extack, &config); } diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 3968d3f98e08..619a4df7be1e 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -239,8 +239,7 @@ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) #else static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) { - kfree_skb(skb); - + WARN_ON(1); return -EOPNOTSUPP; } #endif diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index b3271957ad9a..3f28ecbdcaef 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -56,6 +56,13 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head, x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, (xfrm_address_t *)&ip_hdr(skb)->daddr, spi, IPPROTO_ESP, AF_INET); + + if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) { + /* non-offload path will record the error and audit log */ + xfrm_state_put(x); + x = NULL; + } + if (!x) goto out_reset; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index f669da98d11d..8956026bc0a2 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -2270,6 +2270,15 @@ void fib_select_path(struct net *net, struct fib_result *res, fib_select_default(fl4, res); check_saddr: - if (!fl4->saddr) - fl4->saddr = fib_result_prefsrc(net, res); + if (!fl4->saddr) { + struct net_device *l3mdev; + + l3mdev = dev_get_by_index_rcu(net, fl4->flowi4_l3mdev); + + if (!l3mdev || + l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) == l3mdev) + fl4->saddr = fib_result_prefsrc(net, res); + else + fl4->saddr = inet_select_addr(l3mdev, 0, RT_SCOPE_LINK); + } } diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index f474106464d2..8f30e3f00b7f 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1629,6 +1629,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, res->nhc = nhc; res->type = fa->fa_type; res->scope = fi->fib_scope; + res->dscp = fa->fa_dscp; res->fi = fi; res->table = tb; res->fa_head = &n->leaf; diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 535856b0f0ed..6b9787ee8601 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -888,9 +888,10 @@ static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh, p = nla_data(nla); for (i = 0; i < nhg->num_nh; ++i) { - p->id = nhg->nh_entries[i].nh->id; - p->weight = nhg->nh_entries[i].weight - 1; - p += 1; + *p++ = (struct nexthop_grp) { + .id = nhg->nh_entries[i].nh->id, + .weight = nhg->nh_entries[i].weight - 1, + }; } if (nhg->resilient && nla_put_nh_group_res(skb, nhg)) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b3073d1c8f8f..990912fa18e8 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1263,7 +1263,7 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt) struct flowi4 fl4 = { .daddr = iph->daddr, .saddr = iph->saddr, - .flowi4_tos = RT_TOS(iph->tos), + .flowi4_tos = iph->tos & IPTOS_RT_MASK, .flowi4_oif = rt->dst.dev->ifindex, .flowi4_iif = skb->dev->ifindex, .flowi4_mark = skb->mark, @@ -2868,9 +2868,9 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow); /* called with rcu_read_lock held */ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, - struct rtable *rt, u32 table_id, struct flowi4 *fl4, - struct sk_buff *skb, u32 portid, u32 seq, - unsigned int flags) + struct rtable *rt, u32 table_id, dscp_t dscp, + struct flowi4 *fl4, struct sk_buff *skb, u32 portid, + u32 seq, unsigned int flags) { struct rtmsg *r; struct nlmsghdr *nlh; @@ -2886,7 +2886,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, r->rtm_family = AF_INET; r->rtm_dst_len = 32; r->rtm_src_len = 0; - r->rtm_tos = fl4 ? fl4->flowi4_tos : 0; + r->rtm_tos = inet_dscp_to_dsfield(dscp); r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT; if (nla_put_u32(skb, RTA_TABLE, table_id)) goto nla_put_failure; @@ -3036,7 +3036,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb, goto next; err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt, - table_id, NULL, skb, + table_id, 0, NULL, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags); if (err) @@ -3332,7 +3332,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, fri.tb_id = table_id; fri.dst = res.prefix; fri.dst_len = res.prefixlen; - fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos); + fri.dscp = res.dscp; fri.type = rt->rt_type; fri.offload = 0; fri.trap = 0; @@ -3359,8 +3359,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, err = fib_dump_info(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0); } else { - err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb, - NETLINK_CB(in_skb).portid, + err = rt_fill_info(net, dst, src, rt, table_id, res.dscp, &fl4, + skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0); } if (err < 0) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e6790ea74877..ec6911034138 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -598,7 +598,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) */ mask |= EPOLLOUT | EPOLLWRNORM; } - /* This barrier is coupled with smp_wmb() in tcp_reset() */ + /* This barrier is coupled with smp_wmb() in tcp_done_with_error() */ smp_rmb(); if (READ_ONCE(sk->sk_err) || !skb_queue_empty_lockless(&sk->sk_error_queue)) @@ -4583,14 +4583,10 @@ int tcp_abort(struct sock *sk, int err) bh_lock_sock(sk); if (!sock_flag(sk, SOCK_DEAD)) { - WRITE_ONCE(sk->sk_err, err); - /* This barrier is coupled with smp_rmb() in tcp_poll() */ - smp_wmb(); - sk_error_report(sk); if (tcp_need_reset(sk->sk_state)) tcp_send_active_reset(sk, GFP_ATOMIC, SK_RST_REASON_NOT_SPECIFIED); - tcp_done(sk); + tcp_done_with_error(sk, err); } bh_unlock_sock(sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 38da23f991d6..570e87ad9a56 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4469,9 +4469,26 @@ static enum skb_drop_reason tcp_sequence(const struct tcp_sock *tp, return SKB_NOT_DROPPED_YET; } + +void tcp_done_with_error(struct sock *sk, int err) +{ + /* This barrier is coupled with smp_rmb() in tcp_poll() */ + WRITE_ONCE(sk->sk_err, err); + smp_wmb(); + + tcp_write_queue_purge(sk); + tcp_done(sk); + + if (!sock_flag(sk, SOCK_DEAD)) + sk_error_report(sk); +} +EXPORT_SYMBOL(tcp_done_with_error); + /* When we get a reset we do this. */ void tcp_reset(struct sock *sk, struct sk_buff *skb) { + int err; + trace_tcp_receive_reset(sk); /* mptcp can't tell us to ignore reset pkts, @@ -4483,24 +4500,17 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb) /* We want the right error as BSD sees it (and indeed as we do). */ switch (sk->sk_state) { case TCP_SYN_SENT: - WRITE_ONCE(sk->sk_err, ECONNREFUSED); + err = ECONNREFUSED; break; case TCP_CLOSE_WAIT: - WRITE_ONCE(sk->sk_err, EPIPE); + err = EPIPE; break; case TCP_CLOSE: return; default: - WRITE_ONCE(sk->sk_err, ECONNRESET); + err = ECONNRESET; } - /* This barrier is coupled with smp_rmb() in tcp_poll() */ - smp_wmb(); - - tcp_write_queue_purge(sk); - tcp_done(sk); - - if (!sock_flag(sk, SOCK_DEAD)) - sk_error_report(sk); + tcp_done_with_error(sk, err); } /* diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index b710958393e6..a541659b6562 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -611,15 +611,10 @@ int tcp_v4_err(struct sk_buff *skb, u32 info) ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th); - if (!sock_owned_by_user(sk)) { - WRITE_ONCE(sk->sk_err, err); - - sk_error_report(sk); - - tcp_done(sk); - } else { + if (!sock_owned_by_user(sk)) + tcp_done_with_error(sk, err); + else WRITE_ONCE(sk->sk_err_soft, err); - } goto out; } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 538c06f95918..0fbebf6266e9 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -515,9 +515,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, const struct tcp_sock *oldtp; struct tcp_sock *newtp; u32 seq; -#ifdef CONFIG_TCP_AO - struct tcp_ao_key *ao_key; -#endif if (!newsk) return NULL; @@ -608,10 +605,14 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, #endif #ifdef CONFIG_TCP_AO newtp->ao_info = NULL; - ao_key = treq->af_specific->ao_lookup(sk, req, - tcp_rsk(req)->ao_keyid, -1); - if (ao_key) - newtp->tcp_header_len += tcp_ao_len_aligned(ao_key); + + if (tcp_rsk_used_ao(req)) { + struct tcp_ao_key *ao_key; + + ao_key = treq->af_specific->ao_lookup(sk, req, tcp_rsk(req)->ao_keyid, -1); + if (ao_key) + newtp->tcp_header_len += tcp_ao_len_aligned(ao_key); + } #endif if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 892c86657fbc..4d40615dc8fc 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -74,11 +74,7 @@ u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when) static void tcp_write_err(struct sock *sk) { - WRITE_ONCE(sk->sk_err, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT); - sk_error_report(sk); - - tcp_write_queue_purge(sk); - tcp_done(sk); + tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 5c424a0e7232..4f2c5cc31015 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1873,7 +1873,8 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev, master, &dst, scores, hiscore_idx); - if (scores[hiscore_idx].ifa) + if (scores[hiscore_idx].ifa && + scores[hiscore_idx].scopedist >= 0) goto out; } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 34a9a5b9ed00..3920e8aa1031 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -256,8 +256,7 @@ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) #else static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) { - kfree_skb(skb); - + WARN_ON(1); return -EOPNOTSUPP; } #endif diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 527b7caddbc6..919ebfabbe4e 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -83,6 +83,13 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head, x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, (xfrm_address_t *)&ipv6_hdr(skb)->daddr, spi, IPPROTO_ESP, AF_INET6); + + if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) { + /* non-offload path will record the error and audit log */ + xfrm_state_put(x); + x = NULL; + } + if (!x) goto out_reset; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 27d8725445e3..784424ac4147 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1124,6 +1124,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, from = rt ? rcu_dereference(rt->from) : NULL; err = ip6_route_get_saddr(net, from, &fl6->daddr, sk ? READ_ONCE(inet6_sk(sk)->srcprefs) : 0, + fl6->flowi6_l3mdev, &fl6->saddr); rcu_read_unlock(); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8d72ca0b086d..c9a9506b714d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -5689,7 +5689,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; } else if (dest) { struct in6_addr saddr_buf; - if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 && + if (ip6_route_get_saddr(net, rt, dest, 0, 0, &saddr_buf) == 0 && nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) goto nla_put_failure; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 729faf8bd366..3385faf1d5dc 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -490,14 +490,10 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th); - if (!sock_owned_by_user(sk)) { - WRITE_ONCE(sk->sk_err, err); - sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ - - tcp_done(sk); - } else { + if (!sock_owned_by_user(sk)) + tcp_done_with_error(sk, err); + else WRITE_ONCE(sk->sk_err_soft, err); - } goto out; case TCP_LISTEN: break; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 380695fdc32f..e6a7ff6ca679 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -775,13 +775,24 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, /* TDLS peers can sometimes affect the chandef width */ list_for_each_entry(sta, &local->sta_list, list) { + struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_chan_req tdls_chanreq = {}; + int tdls_link_id; + if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) || !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->tdls_chandef.chan) continue; + tdls_link_id = ieee80211_tdls_sta_link_id(sta); + link = sdata_dereference(sdata->link[tdls_link_id], sdata); + if (!link) + continue; + + if (rcu_access_pointer(link->conf->chanctx_conf) != conf) + continue; + tdls_chanreq.oper = sta->tdls_chandef; /* note this always fills and returns &tmp if compat */ diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 0965ad11ec74..7ba329ebdda9 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -148,7 +148,7 @@ static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local, offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; /* force it also for scanning, since drivers might config differently */ - if (offchannel_flag || local->scanning || + if (offchannel_flag || local->scanning || local->in_reconfig || !cfg80211_chandef_identical(&local->hw.conf.chandef, &chandef)) { local->hw.conf.chandef = chandef; changed |= IEEE80211_CONF_CHANGE_CHANNEL; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a5f2d3cfe60d..ad2ce9c92ba8 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3287,8 +3287,17 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, sizeof(sdata->u.mgd.ttlm_info)); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work); + memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm)); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->neg_ttlm_timeout_work); + + sdata->u.mgd.removed_links = 0; + wiphy_delayed_work_cancel(sdata->local->hw.wiphy, + &sdata->u.mgd.ml_reconf_work); + + wiphy_work_cancel(sdata->local->hw.wiphy, + &ifmgd->teardown_ttlm_work); + ieee80211_vif_set_links(sdata, 0, 0); ifmgd->mcast_seq_last = IEEE80211_SN_MODULO; @@ -6834,7 +6843,7 @@ static void ieee80211_teardown_ttlm_work(struct wiphy *wiphy, u16 new_dormant_links; struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, - u.mgd.neg_ttlm_timeout_work.work); + u.mgd.teardown_ttlm_work); if (!sdata->vif.neg_ttlm.valid) return; @@ -8704,12 +8713,8 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) &ifmgd->beacon_connection_loss_work); wiphy_work_cancel(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); - wiphy_work_cancel(sdata->local->hw.wiphy, - &ifmgd->teardown_ttlm_work); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->tdls_peer_del_work); - wiphy_delayed_work_cancel(sdata->local->hw.wiphy, - &ifmgd->ml_reconf_work); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->neg_ttlm_timeout_work); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index bd5e2f7146f6..9195d5a2de0a 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -727,6 +727,12 @@ struct sta_info { struct ieee80211_sta sta; }; +static inline int ieee80211_tdls_sta_link_id(struct sta_info *sta) +{ + /* TDLS STA can only have a single link */ + return sta->sta.valid_links ? __ffs(sta->sta.valid_links) : 0; +} + static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta) { #ifdef CONFIG_MAC80211_MESH diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f861d99e5f05..72a9ba8bc5fd 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2774,8 +2774,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, if (tdls_peer) { /* For TDLS only one link can be valid with peer STA */ - int tdls_link_id = sta->sta.valid_links ? - __ffs(sta->sta.valid_links) : 0; + int tdls_link_id = ieee80211_tdls_sta_link_id(sta); struct ieee80211_link_data *link; /* DA SA BSSID */ @@ -3101,8 +3100,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta) case NL80211_IFTYPE_STATION: if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { /* For TDLS only one link can be valid with peer STA */ - int tdls_link_id = sta->sta.valid_links ? - __ffs(sta->sta.valid_links) : 0; + int tdls_link_id = ieee80211_tdls_sta_link_id(sta); struct ieee80211_link_data *link; /* DA SA BSSID */ diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index b6d0dcf3a5c3..f4384e147ee1 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1459,18 +1459,18 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, if (ret < 0) goto out_err; - /* Bind the ct retriever */ - RCU_INIT_POINTER(svc->pe, pe); - pe = NULL; - /* Update the virtual service counters */ if (svc->port == FTPPORT) atomic_inc(&ipvs->ftpsvc_counter); else if (svc->port == 0) atomic_inc(&ipvs->nullsvc_counter); - if (svc->pe && svc->pe->conn_out) + if (pe && pe->conn_out) atomic_inc(&ipvs->conn_out_counter); + /* Bind the ct retriever */ + RCU_INIT_POINTER(svc->pe, pe); + pe = NULL; + /* Count only IPv4 services for old get/setsockopt interface */ if (svc->af == AF_INET) ipvs->num_services++; diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index 1e689c714127..83e452916403 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -126,7 +126,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, if (sctph->source != cp->vport || payload_csum || skb->ip_summed == CHECKSUM_PARTIAL) { sctph->source = cp->vport; - if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb)) + if (!skb_is_gso(skb)) sctp_nat_csum(skb, sctph, sctphoff); } else { skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -175,7 +175,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, (skb->ip_summed == CHECKSUM_PARTIAL && !(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) { sctph->dest = cp->dport; - if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb)) + if (!skb_is_gso(skb)) sctp_nat_csum(skb, sctph, sctphoff); } else if (skb->ip_summed != CHECKSUM_PARTIAL) { skb->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 3b846cbdc050..4cbf71d0786b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -3420,7 +3420,8 @@ static int ctnetlink_del_expect(struct sk_buff *skb, if (cda[CTA_EXPECT_ID]) { __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); - if (ntohl(id) != (u32)(unsigned long)exp) { + + if (id != nf_expect_get_id(exp)) { nf_ct_expect_put(exp); return -ENOENT; } diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 15a236bebb46..eb4c4a4ac7ac 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -434,7 +434,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set, res_map = scratch->map + (map_index ? m->bsize_max : 0); fill_map = scratch->map + (map_index ? 0 : m->bsize_max); - memset(res_map, 0xff, m->bsize_max * sizeof(*res_map)); + pipapo_resmap_init(m, res_map); nft_pipapo_for_each_field(f, i, m) { bool last = i == m->field_count - 1; @@ -542,7 +542,7 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net, goto out; } - memset(res_map, 0xff, m->bsize_max * sizeof(*res_map)); + pipapo_resmap_init(m, res_map); nft_pipapo_for_each_field(f, i, m) { bool last = i == m->field_count - 1; diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h index 0d2e40e10f7f..4a2ff85ce1c4 100644 --- a/net/netfilter/nft_set_pipapo.h +++ b/net/netfilter/nft_set_pipapo.h @@ -278,4 +278,25 @@ static u64 pipapo_estimate_size(const struct nft_set_desc *desc) return size; } +/** + * pipapo_resmap_init() - Initialise result map before first use + * @m: Matching data, including mapping table + * @res_map: Result map + * + * Initialize all bits covered by the first field to one, so that after + * the first step, only the matching bits of the first bit group remain. + * + * If other fields have a large bitmap, set remainder of res_map to 0. + */ +static inline void pipapo_resmap_init(const struct nft_pipapo_match *m, unsigned long *res_map) +{ + const struct nft_pipapo_field *f = m->f; + int i; + + for (i = 0; i < f->bsize; i++) + res_map[i] = ULONG_MAX; + + for (i = f->bsize; i < m->bsize_max; i++) + res_map[i] = 0ul; +} #endif /* _NFT_SET_PIPAPO_H */ diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c index d08407d589ea..b8d3c3213efe 100644 --- a/net/netfilter/nft_set_pipapo_avx2.c +++ b/net/netfilter/nft_set_pipapo_avx2.c @@ -1036,6 +1036,7 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill, /** * nft_pipapo_avx2_lookup_slow() - Fallback function for uncommon field sizes + * @mdata: Matching data, including mapping table * @map: Previous match result, used as initial bitmap * @fill: Destination bitmap to be filled with current match result * @f: Field, containing lookup and mapping tables @@ -1051,7 +1052,8 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill, * Return: -1 on no match, rule index of match if @last, otherwise first long * word index to be checked next (i.e. first filled word). */ -static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill, +static int nft_pipapo_avx2_lookup_slow(const struct nft_pipapo_match *mdata, + unsigned long *map, unsigned long *fill, const struct nft_pipapo_field *f, int offset, const u8 *pkt, bool first, bool last) @@ -1060,7 +1062,7 @@ static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill, int i, ret = -1, b; if (first) - memset(map, 0xff, bsize * sizeof(*map)); + pipapo_resmap_init(mdata, map); for (i = offset; i < bsize; i++) { if (f->bb == 8) @@ -1137,8 +1139,14 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set, bool map_index; int i, ret = 0; - if (unlikely(!irq_fpu_usable())) - return nft_pipapo_lookup(net, set, key, ext); + local_bh_disable(); + + if (unlikely(!irq_fpu_usable())) { + bool fallback_res = nft_pipapo_lookup(net, set, key, ext); + + local_bh_enable(); + return fallback_res; + } m = rcu_dereference(priv->match); @@ -1153,6 +1161,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set, scratch = *raw_cpu_ptr(m->scratch); if (unlikely(!scratch)) { kernel_fpu_end(); + local_bh_enable(); return false; } @@ -1186,7 +1195,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set, } else if (f->groups == 16) { NFT_SET_PIPAPO_AVX2_LOOKUP(8, 16); } else { - ret = nft_pipapo_avx2_lookup_slow(res, fill, f, + ret = nft_pipapo_avx2_lookup_slow(m, res, fill, f, ret, rp, first, last); } @@ -1202,7 +1211,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set, } else if (f->groups == 32) { NFT_SET_PIPAPO_AVX2_LOOKUP(4, 32); } else { - ret = nft_pipapo_avx2_lookup_slow(res, fill, f, + ret = nft_pipapo_avx2_lookup_slow(m, res, fill, f, ret, rp, first, last); } @@ -1233,6 +1242,7 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set, if (i % 2) scratch->map_index = !map_index; kernel_fpu_end(); + local_bh_enable(); return ret >= 0; } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ea3ebc160e25..4692a9ef110b 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -538,6 +538,61 @@ static void *packet_current_frame(struct packet_sock *po, return packet_lookup_frame(po, rb, rb->head, status); } +static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev) +{ + u8 *skb_orig_data = skb->data; + int skb_orig_len = skb->len; + struct vlan_hdr vhdr, *vh; + unsigned int header_len; + + if (!dev) + return 0; + + /* In the SOCK_DGRAM scenario, skb data starts at the network + * protocol, which is after the VLAN headers. The outer VLAN + * header is at the hard_header_len offset in non-variable + * length link layer headers. If it's a VLAN device, the + * min_header_len should be used to exclude the VLAN header + * size. + */ + if (dev->min_header_len == dev->hard_header_len) + header_len = dev->hard_header_len; + else if (is_vlan_dev(dev)) + header_len = dev->min_header_len; + else + return 0; + + skb_push(skb, skb->data - skb_mac_header(skb)); + vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr); + if (skb_orig_data != skb->data) { + skb->data = skb_orig_data; + skb->len = skb_orig_len; + } + if (unlikely(!vh)) + return 0; + + return ntohs(vh->h_vlan_TCI); +} + +static __be16 vlan_get_protocol_dgram(struct sk_buff *skb) +{ + __be16 proto = skb->protocol; + + if (unlikely(eth_type_vlan(proto))) { + u8 *skb_orig_data = skb->data; + int skb_orig_len = skb->len; + + skb_push(skb, skb->data - skb_mac_header(skb)); + proto = __vlan_get_protocol(skb, proto, NULL); + if (skb_orig_data != skb->data) { + skb->data = skb_orig_data; + skb->len = skb_orig_len; + } + } + + return proto; +} + static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) { del_timer_sync(&pkc->retire_blk_timer); @@ -1007,10 +1062,16 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { + struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc); + if (skb_vlan_tag_present(pkc->skb)) { ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; + } else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) { + ppd->hv1.tp_vlan_tci = vlan_get_tci(pkc->skb, pkc->skb->dev); + ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol); + ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { ppd->hv1.tp_vlan_tci = 0; ppd->hv1.tp_vlan_tpid = 0; @@ -2428,6 +2489,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; + } else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) { + h.h2->tp_vlan_tci = vlan_get_tci(skb, skb->dev); + h.h2->tp_vlan_tpid = ntohs(skb->protocol); + status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { h.h2->tp_vlan_tci = 0; h.h2->tp_vlan_tpid = 0; @@ -2457,7 +2522,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, sll->sll_halen = dev_parse_header(skb, sll->sll_addr); sll->sll_family = AF_PACKET; sll->sll_hatype = dev->type; - sll->sll_protocol = skb->protocol; + sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ? + vlan_get_protocol_dgram(skb) : skb->protocol; sll->sll_pkttype = skb->pkt_type; if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV))) sll->sll_ifindex = orig_dev->ifindex; @@ -3482,7 +3548,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, /* Original length was stored in sockaddr_ll fields */ origlen = PACKET_SKB_CB(skb)->sa.origlen; sll->sll_family = AF_PACKET; - sll->sll_protocol = skb->protocol; + sll->sll_protocol = (sock->type == SOCK_DGRAM) ? + vlan_get_protocol_dgram(skb) : skb->protocol; } sock_recv_cmsgs(msg, sk, skb); @@ -3539,6 +3606,21 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, aux.tp_vlan_tci = skb_vlan_tag_get(skb); aux.tp_vlan_tpid = ntohs(skb->vlan_proto); aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; + } else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) { + struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; + struct net_device *dev; + + rcu_read_lock(); + dev = dev_get_by_index_rcu(sock_net(sk), sll->sll_ifindex); + if (dev) { + aux.tp_vlan_tci = vlan_get_tci(skb, dev); + aux.tp_vlan_tpid = ntohs(skb->protocol); + aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; + } else { + aux.tp_vlan_tci = 0; + aux.tp_vlan_tpid = 0; + } + rcu_read_unlock(); } else { aux.tp_vlan_tci = 0; aux.tp_vlan_tpid = 0; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index fafdb97adfad..acca3b1a068f 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -2015,7 +2015,6 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini) */ static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb) { - const unsigned int max_scat = SG_MAX_SINGLE_ALLOC * PAGE_SIZE; u8 compressed; if (size <= SMC_BUF_MIN_SIZE) @@ -2025,9 +2024,11 @@ static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb) compressed = min_t(u8, ilog2(size) + 1, is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES); +#ifdef CONFIG_ARCH_NO_SG_CHAIN if (!is_smcd && is_rmb) /* RMBs are backed by & limited to max size of scatterlists */ - compressed = min_t(u8, compressed, ilog2(max_scat >> 14)); + compressed = min_t(u8, compressed, ilog2((SG_MAX_SINGLE_ALLOC * PAGE_SIZE) >> 14)); +#endif return compressed; } diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c index 06d8ee0db000..4eb19c3a54c7 100644 --- a/net/sunrpc/auth_gss/gss_krb5_keys.c +++ b/net/sunrpc/auth_gss/gss_krb5_keys.c @@ -168,7 +168,7 @@ static int krb5_DK(const struct gss_krb5_enctype *gk5e, goto err_return; blocksize = crypto_sync_skcipher_blocksize(cipher); if (crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len)) - goto err_return; + goto err_free_cipher; ret = -ENOMEM; inblockdata = kmalloc(blocksize, gfp_mask); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index cfd1b1bf7e35..09f29a95f2bc 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -2326,12 +2326,13 @@ call_transmit_status(struct rpc_task *task) task->tk_action = call_transmit; task->tk_status = 0; break; - case -ECONNREFUSED: case -EHOSTDOWN: case -ENETDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -EPERM: + break; + case -ECONNREFUSED: if (RPC_IS_SOFTCONN(task)) { if (!task->tk_msg.rpc_proc->p_proc) trace_xprt_ping(task->tk_xprt, diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index ffbf99894970..47f33bb7bff8 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -92,7 +92,8 @@ static void frwr_mr_put(struct rpcrdma_mr *mr) rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); } -/* frwr_reset - Place MRs back on the free list +/** + * frwr_reset - Place MRs back on @req's free list * @req: request to reset * * Used after a failed marshal. For FRWR, this means the MRs diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 432557a553e7..a0b071089e15 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -897,6 +897,8 @@ static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt) static void rpcrdma_req_reset(struct rpcrdma_req *req) { + struct rpcrdma_mr *mr; + /* Credits are valid for only one connection */ req->rl_slot.rq_cong = 0; @@ -906,7 +908,19 @@ static void rpcrdma_req_reset(struct rpcrdma_req *req) rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); rpcrdma_regbuf_dma_unmap(req->rl_recvbuf); - frwr_reset(req); + /* The verbs consumer can't know the state of an MR on the + * req->rl_registered list unless a successful completion + * has occurred, so they cannot be re-used. + */ + while ((mr = rpcrdma_mr_pop(&req->rl_registered))) { + struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf; + + spin_lock(&buf->rb_lock); + list_del(&mr->mr_all); + spin_unlock(&buf->rb_lock); + + frwr_mr_release(mr); + } } /* ASSUMPTION: the rb_allreqs list is stable for the duration, diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index b849a3d133a0..439f75539977 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -135,8 +135,11 @@ static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size) snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->port)); else if (ntohs(ua->proto) == ETH_P_IPV6) snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->port)); - else + else { pr_err("Invalid UDP media address\n"); + return 1; + } + return 0; } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 142f56770b77..11cb5badafb6 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2667,10 +2667,49 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor) { + struct unix_sock *u = unix_sk(sk); + struct sk_buff *skb; + int err; + if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) return -ENOTCONN; - return unix_read_skb(sk, recv_actor); + mutex_lock(&u->iolock); + skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err); + mutex_unlock(&u->iolock); + if (!skb) + return err; + +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + if (unlikely(skb == READ_ONCE(u->oob_skb))) { + bool drop = false; + + unix_state_lock(sk); + + if (sock_flag(sk, SOCK_DEAD)) { + unix_state_unlock(sk); + kfree_skb(skb); + return -ECONNRESET; + } + + spin_lock(&sk->sk_receive_queue.lock); + if (likely(skb == u->oob_skb)) { + WRITE_ONCE(u->oob_skb, NULL); + drop = true; + } + spin_unlock(&sk->sk_receive_queue.lock); + + unix_state_unlock(sk); + + if (drop) { + WARN_ON_ONCE(skb_unref(skb)); + kfree_skb(skb); + return -EAGAIN; + } + } +#endif + + return recv_actor(sk, skb); } static int unix_stream_read_generic(struct unix_stream_read_state *state, diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c index bd84785bf8d6..bca2d86ba97d 100644 --- a/net/unix/unix_bpf.c +++ b/net/unix/unix_bpf.c @@ -54,6 +54,9 @@ static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg, struct sk_psock *psock; int copied; + if (flags & MSG_OOB) + return -EOPNOTSUPP; + if (!len) return 0; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 72c7bf558581..0fd075238fc7 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1208,6 +1208,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT)) goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_CAN_MONITOR) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_CAN_MONITOR)) + goto nla_put_failure; } if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, diff --git a/net/wireless/util.c b/net/wireless/util.c index 082c6f9c5416..af6ec719567f 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1504,7 +1504,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate) 5120, /* 0.833333... */ }; u32 rates_160M[3] = { 960777777, 907400000, 816666666 }; - u32 rates_969[3] = { 480388888, 453700000, 408333333 }; + u32 rates_996[3] = { 480388888, 453700000, 408333333 }; u32 rates_484[3] = { 229411111, 216666666, 195000000 }; u32 rates_242[3] = { 114711111, 108333333, 97500000 }; u32 rates_106[3] = { 40000000, 37777777, 34000000 }; @@ -1524,12 +1524,14 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate) if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8)) return 0; - if (rate->bw == RATE_INFO_BW_160) + if (rate->bw == RATE_INFO_BW_160 || + (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_2x996)) result = rates_160M[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_80 || (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_996)) - result = rates_969[rate->he_gi]; + result = rates_996[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_40 || (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_484)) diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index caa340134b0e..9f76ca591d54 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -151,6 +151,7 @@ static int xdp_umem_account_pages(struct xdp_umem *umem) #define XDP_UMEM_FLAGS_VALID ( \ XDP_UMEM_UNALIGNED_CHUNK_FLAG | \ XDP_UMEM_TX_SW_CSUM | \ + XDP_UMEM_TX_METADATA_LEN | \ 0) static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) @@ -204,8 +205,11 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) if (headroom >= chunk_size - XDP_PACKET_HEADROOM) return -EINVAL; - if (mr->tx_metadata_len >= 256 || mr->tx_metadata_len % 8) - return -EINVAL; + if (mr->flags & XDP_UMEM_TX_METADATA_LEN) { + if (mr->tx_metadata_len >= 256 || mr->tx_metadata_len % 8) + return -EINVAL; + umem->tx_metadata_len = mr->tx_metadata_len; + } umem->size = size; umem->headroom = headroom; @@ -215,7 +219,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) umem->pgs = NULL; umem->user = NULL; umem->flags = mr->flags; - umem->tx_metadata_len = mr->tx_metadata_len; INIT_LIST_HEAD(&umem->xsk_dma_list); refcount_set(&umem->users, 1); diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index d2ea18dcb0cb..e95462b982b0 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -474,11 +474,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) if (encap_type < 0 || (xo && xo->flags & XFRM_GRO)) { x = xfrm_input_state(skb); - if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) { - XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEDIRERROR); - goto drop; - } - if (unlikely(x->km.state != XFRM_STATE_VALID)) { if (x->km.state == XFRM_STATE_ACQ) XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); @@ -585,8 +580,11 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) } if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) { + secpath_reset(skb); XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEDIRERROR); + xfrm_audit_state_notfound(skb, family, spi, seq); xfrm_state_put(x); + x = NULL; goto drop; } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 66e07de2de35..56b88ad88db6 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -452,6 +452,8 @@ EXPORT_SYMBOL(xfrm_policy_destroy); static void xfrm_policy_kill(struct xfrm_policy *policy) { + xfrm_dev_policy_delete(policy); + write_lock_bh(&policy->lock); policy->walk.dead = 1; write_unlock_bh(&policy->lock); @@ -1850,7 +1852,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid) __xfrm_policy_unlink(pol, dir); spin_unlock_bh(&net->xfrm.xfrm_policy_lock); - xfrm_dev_policy_delete(pol); cnt++; xfrm_audit_policy_delete(pol, 1, task_valid); xfrm_policy_kill(pol); @@ -1891,7 +1892,6 @@ int xfrm_dev_policy_flush(struct net *net, struct net_device *dev, __xfrm_policy_unlink(pol, dir); spin_unlock_bh(&net->xfrm.xfrm_policy_lock); - xfrm_dev_policy_delete(pol); cnt++; xfrm_audit_policy_delete(pol, 1, task_valid); xfrm_policy_kill(pol); @@ -2342,7 +2342,6 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir) pol = __xfrm_policy_unlink(pol, dir); spin_unlock_bh(&net->xfrm.xfrm_policy_lock); if (pol) { - xfrm_dev_policy_delete(pol); xfrm_policy_kill(pol); return 0; } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 649bb739df0d..67b2a399a48a 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -49,6 +49,7 @@ static struct kmem_cache *xfrm_state_cache __ro_after_init; static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task); static HLIST_HEAD(xfrm_state_gc_list); +static HLIST_HEAD(xfrm_state_dev_gc_list); static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x) { @@ -214,6 +215,7 @@ static DEFINE_SPINLOCK(xfrm_state_afinfo_lock); static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO]; static DEFINE_SPINLOCK(xfrm_state_gc_lock); +static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock); int __xfrm_state_delete(struct xfrm_state *x); @@ -683,6 +685,41 @@ struct xfrm_state *xfrm_state_alloc(struct net *net) } EXPORT_SYMBOL(xfrm_state_alloc); +#ifdef CONFIG_XFRM_OFFLOAD +void xfrm_dev_state_delete(struct xfrm_state *x) +{ + struct xfrm_dev_offload *xso = &x->xso; + struct net_device *dev = READ_ONCE(xso->dev); + + if (dev) { + dev->xfrmdev_ops->xdo_dev_state_delete(x); + spin_lock_bh(&xfrm_state_dev_gc_lock); + hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list); + spin_unlock_bh(&xfrm_state_dev_gc_lock); + } +} +EXPORT_SYMBOL_GPL(xfrm_dev_state_delete); + +void xfrm_dev_state_free(struct xfrm_state *x) +{ + struct xfrm_dev_offload *xso = &x->xso; + struct net_device *dev = READ_ONCE(xso->dev); + + if (dev && dev->xfrmdev_ops) { + spin_lock_bh(&xfrm_state_dev_gc_lock); + if (!hlist_unhashed(&x->dev_gclist)) + hlist_del(&x->dev_gclist); + spin_unlock_bh(&xfrm_state_dev_gc_lock); + + if (dev->xfrmdev_ops->xdo_dev_state_free) + dev->xfrmdev_ops->xdo_dev_state_free(x); + WRITE_ONCE(xso->dev, NULL); + xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; + netdev_put(dev, &xso->dev_tracker); + } +} +#endif + void __xfrm_state_destroy(struct xfrm_state *x, bool sync) { WARN_ON(x->km.state != XFRM_STATE_DEAD); @@ -848,6 +885,9 @@ EXPORT_SYMBOL(xfrm_state_flush); int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid) { + struct xfrm_state *x; + struct hlist_node *tmp; + struct xfrm_dev_offload *xso; int i, err = 0, cnt = 0; spin_lock_bh(&net->xfrm.xfrm_state_lock); @@ -857,8 +897,6 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali err = -ESRCH; for (i = 0; i <= net->xfrm.state_hmask; i++) { - struct xfrm_state *x; - struct xfrm_dev_offload *xso; restart: hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { xso = &x->xso; @@ -868,6 +906,8 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali spin_unlock_bh(&net->xfrm.xfrm_state_lock); err = xfrm_state_delete(x); + xfrm_dev_state_free(x); + xfrm_audit_state_delete(x, err ? 0 : 1, task_valid); xfrm_state_put(x); @@ -884,6 +924,24 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali out: spin_unlock_bh(&net->xfrm.xfrm_state_lock); + + spin_lock_bh(&xfrm_state_dev_gc_lock); +restart_gc: + hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) { + xso = &x->xso; + + if (xso->dev == dev) { + spin_unlock_bh(&xfrm_state_dev_gc_lock); + xfrm_dev_state_free(x); + spin_lock_bh(&xfrm_state_dev_gc_lock); + goto restart_gc; + } + + } + spin_unlock_bh(&xfrm_state_dev_gc_lock); + + xfrm_flush_gc(); + return err; } EXPORT_SYMBOL(xfrm_dev_state_flush); @@ -1273,8 +1331,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, xso->dev = xdo->dev; xso->real_dev = xdo->real_dev; xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ; - netdev_tracker_alloc(xso->dev, &xso->dev_tracker, - GFP_ATOMIC); + netdev_hold(xso->dev, &xso->dev_tracker, GFP_ATOMIC); error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL); if (error) { xso->dir = 0; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e83c687bd64e..77355422ce82 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2455,7 +2455,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, NETLINK_CB(skb).portid); } } else { - xfrm_dev_policy_delete(xp); xfrm_audit_policy_delete(xp, err ? 0 : 1, true); if (err != 0) diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include index 3ee8ecfb8c04..3500a3d62f0d 100644 --- a/scripts/Kconfig.include +++ b/scripts/Kconfig.include @@ -33,7 +33,8 @@ ld-option = $(success,$(LD) -v $(1)) # $(as-instr,<instr>) # Return y if the assembler supports <instr>, n otherwise -as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -Wa$(comma)--fatal-warnings -c -x assembler-with-cpp -o /dev/null -) +as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) $(2) -Wa$(comma)--fatal-warnings -c -x assembler-with-cpp -o /dev/null -) +as-instr64 = $(as-instr,$(1),$(m64-flag)) # check if $(CC) and $(LD) exist $(error-if,$(failure,command -v $(CC)),C compiler '$(CC)' not found) diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 9f06f6aaf7fc..7f8ec77bf35c 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -407,8 +407,12 @@ cmd_dtc = $(HOSTCC) -E $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; -d $(depfile).dtc.tmp $(dtc-tmp) ; \ cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile) +# NOTE: +# Do not replace $(filter %.dtb %.dtbo, $^) with $(real-prereqs). When a single +# DTB is turned into a multi-blob DTB, $^ will contain header file dependencies +# recorded in the .*.cmd file. quiet_cmd_fdtoverlay = DTOVL $@ - cmd_fdtoverlay = $(objtree)/scripts/dtc/fdtoverlay -o $@ -i $(real-prereqs) + cmd_fdtoverlay = $(objtree)/scripts/dtc/fdtoverlay -o $@ -i $(filter %.dtb %.dtbo, $^) $(multi-dtb-y): FORCE $(call if_changed,fdtoverlay) diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh index 825c75c5b715..9459ca4f0f11 100755 --- a/scripts/gcc-x86_32-has-stack-protector.sh +++ b/scripts/gcc-x86_32-has-stack-protector.sh @@ -5,4 +5,4 @@ # -mstack-protector-guard-reg, added by # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81708 -echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard - -o - 2> /dev/null | grep -q "%fs" +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -m32 -O0 -fstack-protector -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard - -o - 2> /dev/null | grep -q "%fs" diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh index 75e4e22b986a..f680bb01aeeb 100755 --- a/scripts/gcc-x86_64-has-stack-protector.sh +++ b/scripts/gcc-x86_64-has-stack-protector.sh @@ -1,4 +1,4 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 -echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs" +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs" diff --git a/scripts/syscalltbl.sh b/scripts/syscalltbl.sh index 6abe143889ef..6a903b87a7c2 100755 --- a/scripts/syscalltbl.sh +++ b/scripts/syscalltbl.sh @@ -54,7 +54,7 @@ nxt=0 grep -E "^[0-9]+[[:space:]]+$abis" "$infile" | { - while read nr abi name native compat ; do + while read nr abi name native compat noreturn; do if [ $nxt -gt $nr ]; then echo "error: $infile: syscall table is not sorted or duplicates the same syscall number" >&2 @@ -66,7 +66,21 @@ grep -E "^[0-9]+[[:space:]]+$abis" "$infile" | { nxt=$((nxt + 1)) done - if [ -n "$compat" ]; then + if [ "$compat" = "-" ]; then + unset compat + fi + + if [ -n "$noreturn" ]; then + if [ "$noreturn" != "noreturn" ]; then + echo "error: $infile: invalid string \"$noreturn\" in 'noreturn' column" + exit 1 + fi + if [ -n "$compat" ]; then + echo "__SYSCALL_COMPAT_NORETURN($nr, $native, $compat)" + else + echo "__SYSCALL_NORETURN($nr, $native)" + fi + elif [ -n "$compat" ]; then echo "__SYSCALL_WITH_COMPAT($nr, $native, $compat)" elif [ -n "$native" ]; then echo "__SYSCALL($nr, $native)" diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 6239777090c4..4373b914acf2 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -1304,6 +1304,13 @@ static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) if (!skb->secmark) return 0; + /* + * If reach here before socket_post_create hook is called, in which + * case label is null, drop the packet. + */ + if (!ctx->label) + return -EACCES; + return apparmor_secmark_check(ctx->label, OP_RECVMSG, AA_MAY_RECEIVE, skb->secmark, sk); } diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 957654d253dd..14df15e35695 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -225,7 +225,7 @@ static void aa_free_data(void *ptr, void *arg) { struct aa_data *data = ptr; - kfree_sensitive(data->data); + kvfree_sensitive(data->data, data->size); kfree_sensitive(data->key); kfree_sensitive(data); } diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 5e578ef0ddff..5a570235427d 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -747,34 +747,42 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy, *info = "missing required dfa"; goto fail; } - goto out; + } else { + /* + * only unpack the following if a dfa is present + * + * sadly start was given different names for file and policydb + * but since it is optional we can try both + */ + if (!aa_unpack_u32(e, &pdb->start[0], "start")) + /* default start state */ + pdb->start[0] = DFA_START; + if (!aa_unpack_u32(e, &pdb->start[AA_CLASS_FILE], "dfa_start")) { + /* default start state for xmatch and file dfa */ + pdb->start[AA_CLASS_FILE] = DFA_START; + } /* setup class index */ + for (i = AA_CLASS_FILE + 1; i <= AA_CLASS_LAST; i++) { + pdb->start[i] = aa_dfa_next(pdb->dfa, pdb->start[0], + i); + } } /* - * only unpack the following if a dfa is present - * - * sadly start was given different names for file and policydb - * but since it is optional we can try both + * Unfortunately due to a bug in earlier userspaces, a + * transition table may be present even when the dfa is + * not. For compatibility reasons unpack and discard. */ - if (!aa_unpack_u32(e, &pdb->start[0], "start")) - /* default start state */ - pdb->start[0] = DFA_START; - if (!aa_unpack_u32(e, &pdb->start[AA_CLASS_FILE], "dfa_start")) { - /* default start state for xmatch and file dfa */ - pdb->start[AA_CLASS_FILE] = DFA_START; - } /* setup class index */ - for (i = AA_CLASS_FILE + 1; i <= AA_CLASS_LAST; i++) { - pdb->start[i] = aa_dfa_next(pdb->dfa, pdb->start[0], - i); - } if (!unpack_trans_table(e, &pdb->trans) && required_trans) { *info = "failed to unpack profile transition table"; goto fail; } + if (!pdb->dfa && pdb->trans.table) + aa_free_str_table(&pdb->trans); + /* TODO: move compat mapping here, requires dfa merging first */ /* TODO: move verify here, it has to be done after compat mappings */ -out: + *policy = pdb; return 0; @@ -1071,6 +1079,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) if (rhashtable_insert_fast(profile->data, &data->head, profile->data->p)) { + kvfree_sensitive(data->data, data->size); kfree_sensitive(data->key); kfree_sensitive(data); info = "failed to insert data to table"; diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 4bc3e9398ee3..ab927a142f51 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -1694,7 +1694,7 @@ long keyctl_session_to_parent(void) goto unlock; /* cancel an already pending keyring replacement */ - oldwork = task_work_cancel(parent, key_change_session_keyring); + oldwork = task_work_cancel_func(parent, key_change_session_keyring); /* the replacement session keyring is applied just prior to userspace * restarting */ diff --git a/security/landlock/cred.c b/security/landlock/cred.c index 786af18c4a1c..db9fe7d906ba 100644 --- a/security/landlock/cred.c +++ b/security/landlock/cred.c @@ -14,8 +14,8 @@ #include "ruleset.h" #include "setup.h" -static int hook_cred_prepare(struct cred *const new, - const struct cred *const old, const gfp_t gfp) +static void hook_cred_transfer(struct cred *const new, + const struct cred *const old) { struct landlock_ruleset *const old_dom = landlock_cred(old)->domain; @@ -23,6 +23,12 @@ static int hook_cred_prepare(struct cred *const new, landlock_get_ruleset(old_dom); landlock_cred(new)->domain = old_dom; } +} + +static int hook_cred_prepare(struct cred *const new, + const struct cred *const old, const gfp_t gfp) +{ + hook_cred_transfer(new, old); return 0; } @@ -36,6 +42,7 @@ static void hook_cred_free(struct cred *const cred) static struct security_hook_list landlock_hooks[] __ro_after_init = { LSM_HOOK_INIT(cred_prepare, hook_cred_prepare), + LSM_HOOK_INIT(cred_transfer, hook_cred_transfer), LSM_HOOK_INIT(cred_free, hook_cred_free), }; diff --git a/security/security.c b/security/security.c index e5ca08789f74..8cee5b6c6e6d 100644 --- a/security/security.c +++ b/security/security.c @@ -2278,7 +2278,20 @@ int security_inode_getattr(const struct path *path) * @size: size of xattr value * @flags: flags * - * Check permission before setting the extended attributes. + * This hook performs the desired permission checks before setting the extended + * attributes (xattrs) on @dentry. It is important to note that we have some + * additional logic before the main LSM implementation calls to detect if we + * need to perform an additional capability check at the LSM layer. + * + * Normally we enforce a capability check prior to executing the various LSM + * hook implementations, but if a LSM wants to avoid this capability check, + * it can register a 'inode_xattr_skipcap' hook and return a value of 1 for + * xattrs that it wants to avoid the capability check, leaving the LSM fully + * responsible for enforcing the access control for the specific xattr. If all + * of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook, + * or return a 0 (the default return value), the capability check is still + * performed. If no 'inode_xattr_skipcap' hooks are registered the capability + * check is performed. * * Return: Returns 0 if permission is granted. */ @@ -2286,20 +2299,20 @@ int security_inode_setxattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { - int ret; + int rc; if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; - /* - * SELinux and Smack integrate the cap call, - * so assume that all LSMs supplying this call do so. - */ - ret = call_int_hook(inode_setxattr, idmap, dentry, name, value, size, - flags); - if (ret == 1) - ret = cap_inode_setxattr(dentry, name, value, size, flags); - return ret; + /* enforce the capability checks at the lsm layer, if needed */ + if (!call_int_hook(inode_xattr_skipcap, name)) { + rc = cap_inode_setxattr(dentry, name, value, size, flags); + if (rc) + return rc; + } + + return call_int_hook(inode_setxattr, idmap, dentry, name, value, size, + flags); } /** @@ -2452,26 +2465,39 @@ int security_inode_listxattr(struct dentry *dentry) * @dentry: file * @name: xattr name * - * Check permission before removing the extended attribute identified by @name - * for @dentry. + * This hook performs the desired permission checks before setting the extended + * attributes (xattrs) on @dentry. It is important to note that we have some + * additional logic before the main LSM implementation calls to detect if we + * need to perform an additional capability check at the LSM layer. + * + * Normally we enforce a capability check prior to executing the various LSM + * hook implementations, but if a LSM wants to avoid this capability check, + * it can register a 'inode_xattr_skipcap' hook and return a value of 1 for + * xattrs that it wants to avoid the capability check, leaving the LSM fully + * responsible for enforcing the access control for the specific xattr. If all + * of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook, + * or return a 0 (the default return value), the capability check is still + * performed. If no 'inode_xattr_skipcap' hooks are registered the capability + * check is performed. * * Return: Returns 0 if permission is granted. */ int security_inode_removexattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name) { - int ret; + int rc; if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; - /* - * SELinux and Smack integrate the cap call, - * so assume that all LSMs supplying this call do so. - */ - ret = call_int_hook(inode_removexattr, idmap, dentry, name); - if (ret == 1) - ret = cap_inode_removexattr(idmap, dentry, name); - return ret; + + /* enforce the capability checks at the lsm layer, if needed */ + if (!call_int_hook(inode_xattr_skipcap, name)) { + rc = cap_inode_removexattr(idmap, dentry, name); + if (rc) + return rc; + } + + return call_int_hook(inode_removexattr, idmap, dentry, name); } /** diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 7eed331e90f0..55c78c318ccd 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3177,6 +3177,23 @@ static bool has_cap_mac_admin(bool audit) return true; } +/** + * selinux_inode_xattr_skipcap - Skip the xattr capability checks? + * @name: name of the xattr + * + * Returns 1 to indicate that SELinux "owns" the access control rights to xattrs + * named @name; the LSM layer should avoid enforcing any traditional + * capability based access controls on this xattr. Returns 0 to indicate that + * SELinux does not "own" the access control rights to xattrs named @name and is + * deferring to the LSM layer for further access controls, including capability + * based controls. + */ +static int selinux_inode_xattr_skipcap(const char *name) +{ + /* require capability check if not a selinux xattr */ + return !strcmp(name, XATTR_NAME_SELINUX); +} + static int selinux_inode_setxattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) @@ -3188,15 +3205,9 @@ static int selinux_inode_setxattr(struct mnt_idmap *idmap, u32 newsid, sid = current_sid(); int rc = 0; - if (strcmp(name, XATTR_NAME_SELINUX)) { - rc = cap_inode_setxattr(dentry, name, value, size, flags); - if (rc) - return rc; - - /* Not an attribute we recognize, so just check the - ordinary setattr permission. */ + /* if not a selinux xattr, only check the ordinary setattr perm */ + if (strcmp(name, XATTR_NAME_SELINUX)) return dentry_has_perm(current_cred(), dentry, FILE__SETATTR); - } if (!selinux_initialized()) return (inode_owner_or_capable(idmap, inode) ? 0 : -EPERM); @@ -3345,15 +3356,9 @@ static int selinux_inode_listxattr(struct dentry *dentry) static int selinux_inode_removexattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name) { - if (strcmp(name, XATTR_NAME_SELINUX)) { - int rc = cap_inode_removexattr(idmap, dentry, name); - if (rc) - return rc; - - /* Not an attribute we recognize, so just check the - ordinary setattr permission. */ + /* if not a selinux xattr, only check the ordinary setattr perm */ + if (strcmp(name, XATTR_NAME_SELINUX)) return dentry_has_perm(current_cred(), dentry, FILE__SETATTR); - } if (!selinux_initialized()) return 0; @@ -7175,6 +7180,7 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = { LSM_HOOK_INIT(inode_permission, selinux_inode_permission), LSM_HOOK_INIT(inode_setattr, selinux_inode_setattr), LSM_HOOK_INIT(inode_getattr, selinux_inode_getattr), + LSM_HOOK_INIT(inode_xattr_skipcap, selinux_inode_xattr_skipcap), LSM_HOOK_INIT(inode_setxattr, selinux_inode_setxattr), LSM_HOOK_INIT(inode_post_setxattr, selinux_inode_post_setxattr), LSM_HOOK_INIT(inode_getxattr, selinux_inode_getxattr), diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index f5cbec1e6a92..c1fe422cfbe1 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -1282,6 +1282,33 @@ static int smack_inode_getattr(const struct path *path) return rc; } +/** + * smack_inode_xattr_skipcap - Skip the xattr capability checks? + * @name: name of the xattr + * + * Returns 1 to indicate that Smack "owns" the access control rights to xattrs + * named @name; the LSM layer should avoid enforcing any traditional + * capability based access controls on this xattr. Returns 0 to indicate that + * Smack does not "own" the access control rights to xattrs named @name and is + * deferring to the LSM layer for further access controls, including capability + * based controls. + */ +static int smack_inode_xattr_skipcap(const char *name) +{ + if (strncmp(name, XATTR_SMACK_SUFFIX, strlen(XATTR_SMACK_SUFFIX))) + return 0; + + if (strcmp(name, XATTR_NAME_SMACK) == 0 || + strcmp(name, XATTR_NAME_SMACKIPIN) == 0 || + strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 || + strcmp(name, XATTR_NAME_SMACKEXEC) == 0 || + strcmp(name, XATTR_NAME_SMACKMMAP) == 0 || + strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) + return 1; + + return 0; +} + /** * smack_inode_setxattr - Smack check for setting xattrs * @idmap: idmap of the mount @@ -1325,8 +1352,7 @@ static int smack_inode_setxattr(struct mnt_idmap *idmap, size != TRANS_TRUE_SIZE || strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0) rc = -EINVAL; - } else - rc = cap_inode_setxattr(dentry, name, value, size, flags); + } if (check_priv && !smack_privileged(CAP_MAC_ADMIN)) rc = -EPERM; @@ -1435,8 +1461,7 @@ static int smack_inode_removexattr(struct mnt_idmap *idmap, strcmp(name, XATTR_NAME_SMACKMMAP) == 0) { if (!smack_privileged(CAP_MAC_ADMIN)) rc = -EPERM; - } else - rc = cap_inode_removexattr(idmap, dentry, name); + } if (rc != 0) return rc; @@ -5053,6 +5078,7 @@ static struct security_hook_list smack_hooks[] __ro_after_init = { LSM_HOOK_INIT(inode_permission, smack_inode_permission), LSM_HOOK_INIT(inode_setattr, smack_inode_setattr), LSM_HOOK_INIT(inode_getattr, smack_inode_getattr), + LSM_HOOK_INIT(inode_xattr_skipcap, smack_inode_xattr_skipcap), LSM_HOOK_INIT(inode_setxattr, smack_inode_setxattr), LSM_HOOK_INIT(inode_post_setxattr, smack_inode_post_setxattr), LSM_HOOK_INIT(inode_getxattr, smack_inode_getxattr), diff --git a/sound/core/ump.c b/sound/core/ump.c index 3f61220c23b4..0f0d7e895c5a 100644 --- a/sound/core/ump.c +++ b/sound/core/ump.c @@ -733,6 +733,12 @@ static void fill_fb_info(struct snd_ump_endpoint *ump, info->block_id, info->direction, info->active, info->first_group, info->num_groups, info->midi_ci_version, info->sysex8_streams, info->flags); + + if ((info->flags & SNDRV_UMP_BLOCK_IS_MIDI1) && info->num_groups != 1) { + info->num_groups = 1; + ump_dbg(ump, "FB %d: corrected groups to 1 for MIDI1\n", + info->block_id); + } } /* check whether the FB info gets updated by the current message */ @@ -806,6 +812,13 @@ static int ump_handle_fb_name_msg(struct snd_ump_endpoint *ump, if (!fb) return -ENODEV; + if (ump->parsed && + (ump->info.flags & SNDRV_UMP_EP_INFO_STATIC_BLOCKS)) { + ump_dbg(ump, "Skipping static FB name update (blk#%d)\n", + fb->info.block_id); + return 0; + } + ret = ump_append_string(ump, fb->info.name, sizeof(fb->info.name), buf->raw, 3); /* notify the FB name update to sequencer, too */ diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index d35d0a420ee0..1a163bbcabd7 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c @@ -1180,8 +1180,7 @@ static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_ (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time); for (i = 0; i < packets; ++i) { - DEFINE_FLEX(struct fw_iso_packet, template, header, - header_length, CIP_HEADER_QUADLETS); + DEFINE_RAW_FLEX(struct fw_iso_packet, template, header, CIP_HEADER_QUADLETS); bool sched_irq = false; build_it_pkt_header(s, desc->cycle, template, pkt_header_length, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 6f4512b598ea..d749769438ea 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10360,10 +10360,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), - SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2), - SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), + SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), - SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), diff --git a/sound/soc/amd/acp-es8336.c b/sound/soc/amd/acp-es8336.c index e079b3218c6f..3756b8bef17b 100644 --- a/sound/soc/amd/acp-es8336.c +++ b/sound/soc/amd/acp-es8336.c @@ -203,8 +203,10 @@ static int st_es8336_late_probe(struct snd_soc_card *card) codec_dev = acpi_get_first_physical_node(adev); acpi_dev_put(adev); - if (!codec_dev) + if (!codec_dev) { dev_err(card->dev, "can not find codec dev\n"); + return -ENODEV; + } ret = devm_acpi_dev_add_driver_gpios(codec_dev, acpi_es8336_gpios); if (ret) diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index 4e3a8ce690a4..36dddf230c2c 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -220,6 +220,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "21J6"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21M5"), + } + }, { .driver_data = &acp6x_card, .matches = { diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c index 30497152e02a..f609cade805d 100644 --- a/sound/soc/codecs/cs35l56-shared.c +++ b/sound/soc/codecs/cs35l56-shared.c @@ -397,7 +397,7 @@ int cs35l56_irq_request(struct cs35l56_base *cs35l56_base, int irq) { int ret; - if (!irq) + if (irq < 1) return 0; ret = devm_request_threaded_irq(cs35l56_base->dev, irq, NULL, cs35l56_irq, diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c index 8b56ee550c09..8b0645c63462 100644 --- a/sound/soc/codecs/max98088.c +++ b/sound/soc/codecs/max98088.c @@ -1318,6 +1318,7 @@ static int max98088_set_bias_level(struct snd_soc_component *component, enum snd_soc_bias_level level) { struct max98088_priv *max98088 = snd_soc_component_get_drvdata(component); + int ret; switch (level) { case SND_SOC_BIAS_ON: @@ -1333,10 +1334,13 @@ static int max98088_set_bias_level(struct snd_soc_component *component, */ if (!IS_ERR(max98088->mclk)) { if (snd_soc_component_get_bias_level(component) == - SND_SOC_BIAS_ON) + SND_SOC_BIAS_ON) { clk_disable_unprepare(max98088->mclk); - else - clk_prepare_enable(max98088->mclk); + } else { + ret = clk_prepare_enable(max98088->mclk); + if (ret) + return ret; + } } break; diff --git a/sound/soc/codecs/pcm6240.c b/sound/soc/codecs/pcm6240.c index 86e126783a1d..8f7057e689fb 100644 --- a/sound/soc/codecs/pcm6240.c +++ b/sound/soc/codecs/pcm6240.c @@ -2087,10 +2087,8 @@ static int pcmdevice_i2c_probe(struct i2c_client *i2c) #endif pcm_dev = devm_kzalloc(&i2c->dev, sizeof(*pcm_dev), GFP_KERNEL); - if (!pcm_dev) { - ret = -ENOMEM; - goto out; - } + if (!pcm_dev) + return -ENOMEM; pcm_dev->chip_id = (id != NULL) ? id->driver_data : 0; diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c index 265a8ca25cbb..08082806d589 100644 --- a/sound/soc/codecs/tas2781-fmwlib.c +++ b/sound/soc/codecs/tas2781-fmwlib.c @@ -2163,7 +2163,7 @@ static void tasdev_load_calibrated_data(struct tasdevice_priv *priv, int i) return; cal = cal_fmw->calibrations; - if (cal) + if (!cal) return; load_calib_data(priv, &cal->dev_data); @@ -2324,14 +2324,21 @@ void tasdevice_tuning_switch(void *context, int state) struct tasdevice_fw *tas_fmw = tas_priv->fmw; int profile_cfg_id = tas_priv->rcabin.profile_cfg_id; - if (tas_priv->fw_state == TASDEVICE_DSP_FW_FAIL) { - dev_err(tas_priv->dev, "DSP bin file not loaded\n"); + /* + * Only RCA-based Playback can still work with no dsp program running + * inside the chip. + */ + switch (tas_priv->fw_state) { + case TASDEVICE_RCA_FW_OK: + case TASDEVICE_DSP_FW_ALL_OK: + break; + default: return; } if (state == 0) { - if (tas_priv->cur_prog < tas_fmw->nr_programs) { - /*dsp mode or tuning mode*/ + if (tas_fmw && tas_priv->cur_prog < tas_fmw->nr_programs) { + /* dsp mode or tuning mode */ profile_cfg_id = tas_priv->rcabin.profile_cfg_id; tasdevice_select_tuningprm_cfg(tas_priv, tas_priv->cur_prog, tas_priv->cur_conf, @@ -2340,9 +2347,10 @@ void tasdevice_tuning_switch(void *context, int state) tasdevice_select_cfg_blk(tas_priv, profile_cfg_id, TASDEVICE_BIN_BLK_PRE_POWER_UP); - } else + } else { tasdevice_select_cfg_blk(tas_priv, profile_cfg_id, TASDEVICE_BIN_BLK_PRE_SHUTDOWN); + } } EXPORT_SYMBOL_NS_GPL(tasdevice_tuning_switch, SND_SOC_TAS2781_FMWLIB); diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c index 9350972dfefe..c64d458e524e 100644 --- a/sound/soc/codecs/tas2781-i2c.c +++ b/sound/soc/codecs/tas2781-i2c.c @@ -380,23 +380,37 @@ static void tasdevice_fw_ready(const struct firmware *fmw, mutex_lock(&tas_priv->codec_lock); ret = tasdevice_rca_parser(tas_priv, fmw); - if (ret) + if (ret) { + tasdevice_config_info_remove(tas_priv); goto out; + } tasdevice_create_control(tas_priv); tasdevice_dsp_remove(tas_priv); tasdevice_calbin_remove(tas_priv); - tas_priv->fw_state = TASDEVICE_DSP_FW_PENDING; + /* + * The baseline is the RCA-only case, and then the code attempts to + * load DSP firmware but in case of failures just keep going, i.e. + * failing to load DSP firmware is NOT an error. + */ + tas_priv->fw_state = TASDEVICE_RCA_FW_OK; scnprintf(tas_priv->coef_binaryname, 64, "%s_coef.bin", tas_priv->dev_name); ret = tasdevice_dsp_parser(tas_priv); if (ret) { dev_err(tas_priv->dev, "dspfw load %s error\n", tas_priv->coef_binaryname); - tas_priv->fw_state = TASDEVICE_DSP_FW_FAIL; goto out; } - tasdevice_dsp_create_ctrls(tas_priv); + + /* + * If no dsp-related kcontrol created, the dsp resource will be freed. + */ + ret = tasdevice_dsp_create_ctrls(tas_priv); + if (ret) { + dev_err(tas_priv->dev, "dsp controls error\n"); + goto out; + } tas_priv->fw_state = TASDEVICE_DSP_FW_ALL_OK; @@ -417,9 +431,8 @@ static void tasdevice_fw_ready(const struct firmware *fmw, tasdevice_prmg_load(tas_priv, 0); tas_priv->cur_prog = 0; out: - if (tas_priv->fw_state == TASDEVICE_DSP_FW_FAIL) { - /*If DSP FW fail, kcontrol won't be created */ - tasdevice_config_info_remove(tas_priv); + if (tas_priv->fw_state == TASDEVICE_RCA_FW_OK) { + /* If DSP FW fail, DSP kcontrol won't be created. */ tasdevice_dsp_remove(tas_priv); } mutex_unlock(&tas_priv->codec_lock); @@ -466,14 +479,14 @@ static int tasdevice_startup(struct snd_pcm_substream *substream, { struct snd_soc_component *codec = dai->component; struct tasdevice_priv *tas_priv = snd_soc_component_get_drvdata(codec); - int ret = 0; - if (tas_priv->fw_state != TASDEVICE_DSP_FW_ALL_OK) { - dev_err(tas_priv->dev, "DSP bin file not loaded\n"); - ret = -EINVAL; + switch (tas_priv->fw_state) { + case TASDEVICE_RCA_FW_OK: + case TASDEVICE_DSP_FW_ALL_OK: + return 0; + default: + return -EINVAL; } - - return ret; } static int tasdevice_hw_params(struct snd_pcm_substream *substream, diff --git a/sound/soc/codecs/wcd939x.c b/sound/soc/codecs/wcd939x.c index c49894aad8a5..97fd75a935b6 100644 --- a/sound/soc/codecs/wcd939x.c +++ b/sound/soc/codecs/wcd939x.c @@ -182,8 +182,6 @@ struct wcd939x_priv { /* typec handling */ bool typec_analog_mux; #if IS_ENABLED(CONFIG_TYPEC) - struct typec_mux_dev *typec_mux; - struct typec_switch_dev *typec_sw; enum typec_orientation typec_orientation; unsigned long typec_mode; struct typec_switch *typec_switch; @@ -3528,6 +3526,68 @@ static const struct component_master_ops wcd939x_comp_ops = { .unbind = wcd939x_unbind, }; +static void __maybe_unused wcd939x_typec_mux_unregister(void *data) +{ + struct typec_mux_dev *typec_mux = data; + + typec_mux_unregister(typec_mux); +} + +static void __maybe_unused wcd939x_typec_switch_unregister(void *data) +{ + struct typec_switch_dev *typec_sw = data; + + typec_switch_unregister(typec_sw); +} + +static int wcd939x_add_typec(struct wcd939x_priv *wcd939x, struct device *dev) +{ +#if IS_ENABLED(CONFIG_TYPEC) + int ret; + struct typec_mux_dev *typec_mux; + struct typec_switch_dev *typec_sw; + struct typec_mux_desc mux_desc = { + .drvdata = wcd939x, + .fwnode = dev_fwnode(dev), + .set = wcd939x_typec_mux_set, + }; + struct typec_switch_desc sw_desc = { + .drvdata = wcd939x, + .fwnode = dev_fwnode(dev), + .set = wcd939x_typec_switch_set, + }; + + /* + * Is USBSS is used to mux analog lines, + * register a typec mux/switch to get typec events + */ + if (!wcd939x->typec_analog_mux) + return 0; + + typec_mux = typec_mux_register(dev, &mux_desc); + if (IS_ERR(typec_mux)) + return dev_err_probe(dev, PTR_ERR(typec_mux), + "failed to register typec mux\n"); + + ret = devm_add_action_or_reset(dev, wcd939x_typec_mux_unregister, + typec_mux); + if (ret) + return ret; + + typec_sw = typec_switch_register(dev, &sw_desc); + if (IS_ERR(typec_sw)) + return dev_err_probe(dev, PTR_ERR(typec_sw), + "failed to register typec switch\n"); + + ret = devm_add_action_or_reset(dev, wcd939x_typec_switch_unregister, + typec_sw); + if (ret) + return ret; +#endif + + return 0; +} + static int wcd939x_add_slave_components(struct wcd939x_priv *wcd939x, struct device *dev, struct component_match **matchptr) @@ -3576,42 +3636,13 @@ static int wcd939x_probe(struct platform_device *pdev) return -EINVAL; } -#if IS_ENABLED(CONFIG_TYPEC) - /* - * Is USBSS is used to mux analog lines, - * register a typec mux/switch to get typec events - */ - if (wcd939x->typec_analog_mux) { - struct typec_mux_desc mux_desc = { - .drvdata = wcd939x, - .fwnode = dev_fwnode(dev), - .set = wcd939x_typec_mux_set, - }; - struct typec_switch_desc sw_desc = { - .drvdata = wcd939x, - .fwnode = dev_fwnode(dev), - .set = wcd939x_typec_switch_set, - }; - - wcd939x->typec_mux = typec_mux_register(dev, &mux_desc); - if (IS_ERR(wcd939x->typec_mux)) { - ret = dev_err_probe(dev, PTR_ERR(wcd939x->typec_mux), - "failed to register typec mux\n"); - goto err_disable_regulators; - } - - wcd939x->typec_sw = typec_switch_register(dev, &sw_desc); - if (IS_ERR(wcd939x->typec_sw)) { - ret = dev_err_probe(dev, PTR_ERR(wcd939x->typec_sw), - "failed to register typec switch\n"); - goto err_unregister_typec_mux; - } - } -#endif /* CONFIG_TYPEC */ + ret = wcd939x_add_typec(wcd939x, dev); + if (ret) + goto err_disable_regulators; ret = wcd939x_add_slave_components(wcd939x, dev, &match); if (ret) - goto err_unregister_typec_switch; + goto err_disable_regulators; wcd939x_reset(wcd939x); @@ -3628,18 +3659,6 @@ static int wcd939x_probe(struct platform_device *pdev) return 0; -#if IS_ENABLED(CONFIG_TYPEC) -err_unregister_typec_mux: - if (wcd939x->typec_analog_mux) - typec_mux_unregister(wcd939x->typec_mux); -#endif /* CONFIG_TYPEC */ - -err_unregister_typec_switch: -#if IS_ENABLED(CONFIG_TYPEC) - if (wcd939x->typec_analog_mux) - typec_switch_unregister(wcd939x->typec_sw); -#endif /* CONFIG_TYPEC */ - err_disable_regulators: regulator_bulk_disable(WCD939X_MAX_SUPPLY, wcd939x->supplies); regulator_bulk_free(WCD939X_MAX_SUPPLY, wcd939x->supplies); diff --git a/sound/soc/fsl/fsl_qmc_audio.c b/sound/soc/fsl/fsl_qmc_audio.c index bfaaa451735b..dd90ef16fa97 100644 --- a/sound/soc/fsl/fsl_qmc_audio.c +++ b/sound/soc/fsl/fsl_qmc_audio.c @@ -604,6 +604,8 @@ static int qmc_audio_dai_parse(struct qmc_audio *qmc_audio, struct device_node * qmc_dai->name = devm_kasprintf(qmc_audio->dev, GFP_KERNEL, "%s.%d", np->parent->name, qmc_dai->id); + if (!qmc_dai->name) + return -ENOMEM; qmc_dai->qmc_chan = devm_qmc_chan_get_byphandle(qmc_audio->dev, np, "fsl,qmc-chan"); diff --git a/sound/soc/intel/common/soc-acpi-intel-ssp-common.c b/sound/soc/intel/common/soc-acpi-intel-ssp-common.c index 75d0b931d895..de7a3f7f47f1 100644 --- a/sound/soc/intel/common/soc-acpi-intel-ssp-common.c +++ b/sound/soc/intel/common/soc-acpi-intel-ssp-common.c @@ -64,6 +64,15 @@ static const struct codec_map amps[] = { CODEC_MAP_ENTRY("RT1015P", "rt1015", RT1015P_ACPI_HID, CODEC_RT1015P), CODEC_MAP_ENTRY("RT1019P", "rt1019", RT1019P_ACPI_HID, CODEC_RT1019P), CODEC_MAP_ENTRY("RT1308", "rt1308", RT1308_ACPI_HID, CODEC_RT1308), + + /* + * Monolithic components + * + * Only put components that can serve as both the amp and the codec below this line. + * This will ensure that if the part is used just as a codec and there is an amp as well + * then the amp will be selected properly. + */ + CODEC_MAP_ENTRY("RT5650", "rt5650", RT5650_ACPI_HID, CODEC_RT5650), }; enum snd_soc_acpi_intel_codec diff --git a/sound/soc/intel/common/soc-intel-quirks.h b/sound/soc/intel/common/soc-intel-quirks.h index de4e550c5b34..42bd51456b94 100644 --- a/sound/soc/intel/common/soc-intel-quirks.h +++ b/sound/soc/intel/common/soc-intel-quirks.h @@ -11,7 +11,7 @@ #include <linux/platform_data/x86/soc.h> -#if IS_ENABLED(CONFIG_X86) +#if IS_REACHABLE(CONFIG_IOSF_MBI) #include <linux/dmi.h> #include <asm/iosf_mbi.h> diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c index b0f3e02cb043..5a47f661e0c6 100644 --- a/sound/soc/qcom/lpass-cpu.c +++ b/sound/soc/qcom/lpass-cpu.c @@ -1166,9 +1166,13 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev) } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-rxtx-cdc-dma-lpm"); + if (!res) + return -EINVAL; drvdata->rxtx_cdc_dma_lpm_buf = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-va-cdc-dma-lpm"); + if (!res) + return -EINVAL; drvdata->va_cdc_dma_lpm_buf = res->start; } diff --git a/sound/soc/sof/amd/pci-vangogh.c b/sound/soc/sof/amd/pci-vangogh.c index 16eb2994fbab..eba580840100 100644 --- a/sound/soc/sof/amd/pci-vangogh.c +++ b/sound/soc/sof/amd/pci-vangogh.c @@ -34,7 +34,6 @@ static const struct sof_amd_acp_desc vangogh_chip_info = { .dsp_intr_base = ACP5X_DSP_SW_INTR_BASE, .sram_pte_offset = ACP5X_SRAM_PTE_OFFSET, .hw_semaphore_offset = ACP5X_AXI2DAGB_SEM_0, - .acp_clkmux_sel = ACP5X_CLKMUX_SEL, .probe_reg_offset = ACP5X_FUTURE_REG_ACLK_0, }; diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c index 1c7019c3cbd3..cdd1e79ef9f6 100644 --- a/sound/soc/sof/imx/imx8m.c +++ b/sound/soc/sof/imx/imx8m.c @@ -234,7 +234,7 @@ static int imx8m_probe(struct snd_sof_dev *sdev) /* set default mailbox offset for FW ready message */ sdev->dsp_box.offset = MBOX_OFFSET; - priv->regmap = syscon_regmap_lookup_by_compatible("fsl,dsp-ctrl"); + priv->regmap = syscon_regmap_lookup_by_phandle(np, "fsl,dsp-ctrl"); if (IS_ERR(priv->regmap)) { dev_err(sdev->dev, "cannot find dsp-ctrl registers"); ret = PTR_ERR(priv->regmap); diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c index b8b914eaf7e0..75f6240cf3e1 100644 --- a/sound/soc/sof/intel/hda-loader.c +++ b/sound/soc/sof/intel/hda-loader.c @@ -310,15 +310,19 @@ int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream return ret; } - /* Wait for completion of transfer */ - time_left = wait_for_completion_timeout(&hda_stream->ioc, - msecs_to_jiffies(HDA_CL_DMA_IOC_TIMEOUT_MS)); - - if (!time_left) { - dev_err(sdev->dev, "Code loader DMA did not complete\n"); - return -ETIMEDOUT; + if (sdev->pdata->ipc_type == SOF_IPC_TYPE_4) { + /* Wait for completion of transfer */ + time_left = wait_for_completion_timeout(&hda_stream->ioc, + msecs_to_jiffies(HDA_CL_DMA_IOC_TIMEOUT_MS)); + + if (!time_left) { + dev_err(sdev->dev, "Code loader DMA did not complete\n"); + return -ETIMEDOUT; + } + dev_dbg(sdev->dev, "Code loader DMA done\n"); } - dev_dbg(sdev->dev, "Code loader DMA done, waiting for FW_ENTERED status\n"); + + dev_dbg(sdev->dev, "waiting for FW_ENTERED status\n"); status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, chip->rom_status_reg, reg, diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index dead1c19558b..81647ddac8cb 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -1307,9 +1307,10 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev) const struct sof_dev_desc *desc = sof_pdata->desc; struct hdac_bus *bus = sof_to_bus(sdev); struct snd_soc_acpi_mach *mach = NULL; - enum snd_soc_acpi_intel_codec codec_type; + enum snd_soc_acpi_intel_codec codec_type, amp_type; const char *tplg_filename; const char *tplg_suffix; + bool amp_name_valid; /* Try I2S or DMIC if it is supported */ if (interface_mask & (BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC))) @@ -1413,15 +1414,16 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev) } } - codec_type = snd_soc_acpi_intel_detect_amp_type(sdev->dev); + amp_type = snd_soc_acpi_intel_detect_amp_type(sdev->dev); + codec_type = snd_soc_acpi_intel_detect_codec_type(sdev->dev); + amp_name_valid = amp_type != CODEC_NONE && amp_type != codec_type; - if (tplg_fixup && - mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_AMP_NAME && - codec_type != CODEC_NONE) { - tplg_suffix = snd_soc_acpi_intel_get_amp_tplg_suffix(codec_type); + if (tplg_fixup && amp_name_valid && + mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_AMP_NAME) { + tplg_suffix = snd_soc_acpi_intel_get_amp_tplg_suffix(amp_type); if (!tplg_suffix) { dev_err(sdev->dev, "no tplg suffix found, amp %d\n", - codec_type); + amp_type); return NULL; } @@ -1436,7 +1438,6 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev) add_extension = true; } - codec_type = snd_soc_acpi_intel_detect_codec_type(sdev->dev); if (tplg_fixup && mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_CODEC_NAME && diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c index 00987039c972..4d261b9e6d7e 100644 --- a/sound/soc/sof/ipc4-topology.c +++ b/sound/soc/sof/ipc4-topology.c @@ -1358,7 +1358,13 @@ static void sof_ipc4_unprepare_copier_module(struct snd_sof_widget *swidget) ipc4_copier = dai->private; if (pipeline->use_chain_dma) { - pipeline->msg.primary = 0; + /* + * Preserve the DMA Link ID and clear other bits since + * the DMA Link ID is only configured once during + * dai_config, other fields are expected to be 0 for + * re-configuration + */ + pipeline->msg.primary &= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK; pipeline->msg.extension = 0; } @@ -2869,7 +2875,7 @@ static void sof_ipc4_put_queue_id(struct snd_sof_widget *swidget, int queue_id, static int sof_ipc4_set_copier_sink_format(struct snd_sof_dev *sdev, struct snd_sof_widget *src_widget, struct snd_sof_widget *sink_widget, - int sink_id) + struct snd_sof_route *sroute) { struct sof_ipc4_copier_config_set_sink_format format; const struct sof_ipc_ops *iops = sdev->ipc->ops; @@ -2878,9 +2884,6 @@ static int sof_ipc4_set_copier_sink_format(struct snd_sof_dev *sdev, struct sof_ipc4_fw_module *fw_module; struct sof_ipc4_msg msg = {{ 0 }}; - dev_dbg(sdev->dev, "%s set copier sink %d format\n", - src_widget->widget->name, sink_id); - if (WIDGET_IS_DAI(src_widget->id)) { struct snd_sof_dai *dai = src_widget->private; @@ -2891,13 +2894,15 @@ static int sof_ipc4_set_copier_sink_format(struct snd_sof_dev *sdev, fw_module = src_widget->module_info; - format.sink_id = sink_id; + format.sink_id = sroute->src_queue_id; memcpy(&format.source_fmt, &src_config->audio_fmt, sizeof(format.source_fmt)); - pin_fmt = sof_ipc4_get_input_pin_audio_fmt(sink_widget, sink_id); + pin_fmt = sof_ipc4_get_input_pin_audio_fmt(sink_widget, sroute->dst_queue_id); if (!pin_fmt) { - dev_err(sdev->dev, "Unable to get pin %d format for %s", - sink_id, sink_widget->widget->name); + dev_err(sdev->dev, + "Failed to get input audio format of %s:%d for output of %s:%d\n", + sink_widget->widget->name, sroute->dst_queue_id, + src_widget->widget->name, sroute->src_queue_id); return -EINVAL; } @@ -2955,7 +2960,8 @@ static int sof_ipc4_route_setup(struct snd_sof_dev *sdev, struct snd_sof_route * sroute->src_queue_id = sof_ipc4_get_queue_id(src_widget, sink_widget, SOF_PIN_TYPE_OUTPUT); if (sroute->src_queue_id < 0) { - dev_err(sdev->dev, "failed to get queue ID for source widget: %s\n", + dev_err(sdev->dev, + "failed to get src_queue_id ID from source widget %s\n", src_widget->widget->name); return sroute->src_queue_id; } @@ -2963,7 +2969,8 @@ static int sof_ipc4_route_setup(struct snd_sof_dev *sdev, struct snd_sof_route * sroute->dst_queue_id = sof_ipc4_get_queue_id(src_widget, sink_widget, SOF_PIN_TYPE_INPUT); if (sroute->dst_queue_id < 0) { - dev_err(sdev->dev, "failed to get queue ID for sink widget: %s\n", + dev_err(sdev->dev, + "failed to get dst_queue_id ID from sink widget %s\n", sink_widget->widget->name); sof_ipc4_put_queue_id(src_widget, sroute->src_queue_id, SOF_PIN_TYPE_OUTPUT); @@ -2972,10 +2979,11 @@ static int sof_ipc4_route_setup(struct snd_sof_dev *sdev, struct snd_sof_route * /* Pin 0 format is already set during copier module init */ if (sroute->src_queue_id > 0 && WIDGET_IS_COPIER(src_widget->id)) { - ret = sof_ipc4_set_copier_sink_format(sdev, src_widget, sink_widget, - sroute->src_queue_id); + ret = sof_ipc4_set_copier_sink_format(sdev, src_widget, + sink_widget, sroute); if (ret < 0) { - dev_err(sdev->dev, "failed to set sink format for %s source queue ID %d\n", + dev_err(sdev->dev, + "failed to set sink format for source %s:%d\n", src_widget->widget->name, sroute->src_queue_id); goto out; } @@ -3093,8 +3101,14 @@ static int sof_ipc4_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget * return 0; if (pipeline->use_chain_dma) { - pipeline->msg.primary &= ~SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK; - pipeline->msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID(data->dai_data); + /* + * Only configure the DMA Link ID for ChainDMA when this op is + * invoked with SOF_DAI_CONFIG_FLAGS_HW_PARAMS + */ + if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) { + pipeline->msg.primary &= ~SOF_IPC4_GLB_CHAIN_DMA_LINK_ID_MASK; + pipeline->msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID(data->dai_data); + } return 0; } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 409fc1164694..d1bdb0b93bda 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1211,6 +1211,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, cval->res = 16; } break; + case USB_ID(0x1bcf, 0x2281): /* HD Webcam */ + if (!strcmp(kctl->id.name, "Mic Capture Volume")) { + usb_audio_info(chip, + "set resolution quirk: cval->res = 16\n"); + cval->res = 16; + } + break; } } diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 58156fbca02c..ea063a14cdd8 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -2125,6 +2125,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_CTL_MSG_DELAY_1M), DEVICE_FLG(0x0b0e, 0x0349, /* Jabra 550a */ QUIRK_FLAG_CTL_MSG_DELAY_1M), + DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */ + QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */ QUIRK_FLAG_FIXED_RATE), DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */ @@ -2167,6 +2169,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x1bcf, 0x2281, /* HD Webcam */ + QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */ QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x2040, 0x7200, /* Hauppauge HVR-950Q */ diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 958e92acca8e..9b75639434b8 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -410,7 +410,7 @@ void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd, { const char *prog_name = prog_info->name; const struct btf_type *func_type; - const struct bpf_func_info finfo = {}; + struct bpf_func_info finfo = {}; struct bpf_prog_info info = {}; __u32 info_len = sizeof(info); struct btf *prog_btf = NULL; diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 1a501cf09e78..40ea743d139f 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1813,6 +1813,10 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) } if (pinmaps) { + err = create_and_mount_bpffs_dir(pinmaps); + if (err) + goto err_unpin; + err = bpf_object__pin_maps(obj, pinmaps); if (err) { p_err("failed to pin all maps"); diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index af393c7dee1f..b3edc239fe56 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -696,7 +696,7 @@ static int sets_patch(struct object *obj) * Make sure id is at the beginning of the pairs * struct, otherwise the below qsort would not work. */ - BUILD_BUG_ON(set8->pairs != &set8->pairs[0].id); + BUILD_BUG_ON((u32 *)set8->pairs != &set8->pairs[0].id); qsort(set8->pairs, set8->cnt, sizeof(set8->pairs[0]), cmp_id); /* diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 2d0840ef599a..142060bbce0a 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -598,7 +598,7 @@ static int btf_sanity_check(const struct btf *btf) __u32 i, n = btf__type_cnt(btf); int err; - for (i = 1; i < n; i++) { + for (i = btf->start_id; i < n; i++) { t = btf_type_by_id(btf, i); err = btf_validate_type(btf, t, i); if (err) diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 5dbca76b953f..894860111ddb 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -1559,10 +1559,12 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, * Clang for BPF target generates func_proto with no * args as a func_proto with a single void arg (e.g., * `int (*f)(void)` vs just `int (*f)()`). We are - * going to pretend there are no args for such case. + * going to emit valid empty args (void) syntax for + * such case. Similarly and conveniently, valid + * no args case can be special-cased here as well. */ - if (vlen == 1 && p->type == 0) { - btf_dump_printf(d, ")"); + if (vlen == 0 || (vlen == 1 && p->type == 0)) { + btf_dump_printf(d, "void)"); return; } diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index a0dcfb82e455..7e7e686008c6 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -597,13 +597,9 @@ static inline int ensure_good_fd(int fd) return fd; } -static inline int sys_dup2(int oldfd, int newfd) +static inline int sys_dup3(int oldfd, int newfd, int flags) { -#ifdef __NR_dup2 - return syscall(__NR_dup2, oldfd, newfd); -#else - return syscall(__NR_dup3, oldfd, newfd, 0); -#endif + return syscall(__NR_dup3, oldfd, newfd, flags); } /* Point *fixed_fd* to the same file that *tmp_fd* points to. @@ -614,7 +610,7 @@ static inline int reuse_fd(int fixed_fd, int tmp_fd) { int err; - err = sys_dup2(tmp_fd, fixed_fd); + err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC); err = err < 0 ? -errno : 0; close(tmp_fd); /* clean up temporary FD */ return err; diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 0d4be829551b..5a583053e311 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -2213,10 +2213,17 @@ static int linker_fixup_btf(struct src_obj *obj) vi = btf_var_secinfos(t); for (j = 0, m = btf_vlen(t); j < m; j++, vi++) { const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type); - const char *var_name = btf__str_by_offset(obj->btf, vt->name_off); - int var_linkage = btf_var(vt)->linkage; + const char *var_name; + int var_linkage; Elf64_Sym *sym; + /* could be a variable or function */ + if (!btf_is_var(vt)) + continue; + + var_name = btf__str_by_offset(obj->btf, vt->name_off); + var_linkage = btf_var(vt)->linkage; + /* no need to patch up static or extern vars */ if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED) continue; diff --git a/tools/memory-model/lock.cat b/tools/memory-model/lock.cat index 53b5a492739d..21ba65086938 100644 --- a/tools/memory-model/lock.cat +++ b/tools/memory-model/lock.cat @@ -102,19 +102,19 @@ let rf-lf = rfe-lf | rfi-lf * within one of the lock's critical sections returns False. *) -(* rfi for RU events: an RU may read from the last po-previous UL *) -let rfi-ru = ([UL] ; po-loc ; [RU]) \ ([UL] ; po-loc ; [LKW] ; po-loc) - -(* rfe for RU events: an RU may read from an external UL or the initial write *) -let all-possible-rfe-ru = - let possible-rfe-ru r = +(* + * rf for RU events: an RU may read from an external UL or the initial write, + * or from the last po-previous UL + *) +let all-possible-rf-ru = + let possible-rf-ru r = let pair-to-relation p = p ++ 0 - in map pair-to-relation (((UL | IW) * {r}) & loc & ext) - in map possible-rfe-ru RU + in map pair-to-relation ((((UL | IW) * {r}) & loc & ext) | + (((UL * {r}) & po-loc) \ ([UL] ; po-loc ; [LKW] ; po-loc))) + in map possible-rf-ru RU (* Generate all rf relations for RU events *) -with rfe-ru from cross(all-possible-rfe-ru) -let rf-ru = rfe-ru | rfi-ru +with rf-ru from cross(all-possible-rf-ru) (* Final rf relation *) let rf = rf | rf-lf | rf-ru diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h index 7ebf29c91184..1e8141ef1b15 100644 --- a/tools/objtool/noreturns.h +++ b/tools/objtool/noreturns.h @@ -7,12 +7,16 @@ * Yes, this is unfortunate. A better solution is in the works. */ NORETURN(__fortify_panic) +NORETURN(__ia32_sys_exit) +NORETURN(__ia32_sys_exit_group) NORETURN(__kunit_abort) NORETURN(__module_put_and_kthread_exit) NORETURN(__reiserfs_panic) NORETURN(__stack_chk_fail) NORETURN(__tdx_hypercall_failed) NORETURN(__ubsan_handle_builtin_unreachable) +NORETURN(__x64_sys_exit) +NORETURN(__x64_sys_exit_group) NORETURN(arch_cpu_idle_dead) NORETURN(bch2_trans_in_restart_error) NORETURN(bch2_trans_restart_error) diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index 5f3edb3004d8..356786432fd3 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -159,9 +159,9 @@ static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc) Dwarf_Addr start = pc; Dwarf_Addr end = pc; bool signalp; - const char *exec_file = dso->long_name; + const char *exec_file = dso__long_name(dso); - dwfl = dso->dwfl; + dwfl = RC_CHK_ACCESS(dso)->dwfl; if (!dwfl) { dwfl = dwfl_begin(&offline_callbacks); @@ -183,7 +183,7 @@ static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc) dwfl_end(dwfl); goto out; } - dso->dwfl = dwfl; + RC_CHK_ACCESS(dso)->dwfl = dwfl; } mod = dwfl_addrmodule(dwfl, pc); @@ -267,7 +267,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) rc = check_return_addr(dso, map__start(al.map), ip); pr_debug("[DSO %s, sym %s, ip 0x%" PRIx64 "] rc %d\n", - dso->long_name, al.sym->name, ip, rc); + dso__long_name(dso), al.sym->name, ip, rc); if (rc == 0) { /* diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index 6de7e2d21075..4b710e875953 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -32,6 +32,7 @@ #include "../../../util/tsc.h" #include <internal/lib.h> // page_size #include "../../../util/intel-pt.h" +#include <api/fs/fs.h> #define KiB(x) ((x) * 1024) #define MiB(x) ((x) * 1024 * 1024) @@ -428,6 +429,16 @@ static int intel_pt_track_switches(struct evlist *evlist) } #endif +static bool intel_pt_exclude_guest(void) +{ + int pt_mode; + + if (sysfs__read_int("module/kvm_intel/parameters/pt_mode", &pt_mode)) + pt_mode = 0; + + return pt_mode == 1; +} + static void intel_pt_valid_str(char *str, size_t len, u64 valid) { unsigned int val, last = 0, state = 1; @@ -620,6 +631,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, } evsel->core.attr.freq = 0; evsel->core.attr.sample_period = 1; + evsel->core.attr.exclude_guest = intel_pt_exclude_guest(); evsel->no_aux_samples = true; evsel->needs_auxtrace_mmap = true; intel_pt_evsel = evsel; @@ -758,7 +770,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, } if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) { - u32 aux_watermark = opts->auxtrace_mmap_pages * page_size / 4; + size_t aw = opts->auxtrace_mmap_pages * (size_t)page_size / 4; + u32 aux_watermark = aw > UINT_MAX ? UINT_MAX : aw; intel_pt_evsel->core.attr.aux_watermark = aux_watermark; } diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh index 61898e256616..9caa36130175 100755 --- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh +++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh @@ -28,28 +28,21 @@ cleanup_files() trap cleanup_files EXIT TERM INT -# Add a 1 second delay to skip samples that are not in the leaf() function # shellcheck disable=SC2086 -perf record -o "$PERF_DATA" --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null & -PID=$! +perf record -o "$PERF_DATA" --call-graph fp -e cycles//u --user-callchains -- $TEST_PROGRAM -echo " + Recording (PID=$PID)..." -sleep 2 -echo " + Stopping perf-record..." - -kill $PID -wait $PID +# Try opening the file so any immediate errors are visible in the log +perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4 -# expected perf-script output: +# expected perf-script output if 'leaf' has been inserted correctly: # -# program +# perf # 728 leaf # 753 parent # 76c leafloop -# ... +# ... remaining stack to main() ... -perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4 -perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4 | \ - awk '{ if ($2 != "") sym[i++] = $2 } END { if (sym[0] != "leaf" || - sym[1] != "parent" || - sym[2] != "leafloop") exit 1 }' +# Each frame is separated by a tab, some spaces and an address +SEP="[[:space:]]+ [[:xdigit:]]+" +perf script -i "$PERF_DATA" -F comm,ip,sym | tr '\n' ' ' | \ + grep -E -q "perf $SEP leaf $SEP parent $SEP leafloop" diff --git a/tools/perf/tests/workloads/leafloop.c b/tools/perf/tests/workloads/leafloop.c index 1bf5cc97649b..f7561767e32c 100644 --- a/tools/perf/tests/workloads/leafloop.c +++ b/tools/perf/tests/workloads/leafloop.c @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include <signal.h> #include <stdlib.h> #include <linux/compiler.h> +#include <unistd.h> #include "../tests.h" /* We want to check these symbols in perf script */ @@ -8,10 +10,16 @@ noinline void leaf(volatile int b); noinline void parent(volatile int b); static volatile int a; +static volatile sig_atomic_t done; + +static void sighandler(int sig __maybe_unused) +{ + done = 1; +} noinline void leaf(volatile int b) { - for (;;) + while (!done) a += b; } @@ -22,12 +30,16 @@ noinline void parent(volatile int b) static int leafloop(int argc, const char **argv) { - int c = 1; + int sec = 1; if (argc > 0) - c = atoi(argv[0]); + sec = atoi(argv[0]); + + signal(SIGINT, sighandler); + signal(SIGALRM, sighandler); + alarm(sec); - parent(c); + parent(sec); return 0; } diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c index 93ce3d47e47e..6da24aa039eb 100644 --- a/tools/perf/ui/gtk/annotate.c +++ b/tools/perf/ui/gtk/annotate.c @@ -180,13 +180,14 @@ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel, GtkWidget *tab_label; int err; - if (dso->annotate_warned) + if (dso__annotate_warned(dso)) return -1; err = symbol__annotate(ms, evsel, NULL); if (err) { char msg[BUFSIZ]; - dso->annotate_warned = true; + + dso__set_annotate_warned(dso); symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); ui__error("Couldn't annotate %s: %s\n", sym->name, msg); return -1; diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 32818bd7cd17..5e9fbcfad7d4 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -1013,7 +1013,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id, if (!dso) goto out; - if (dso->data.status == DSO_DATA_STATUS_ERROR && + if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR && dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE)) goto out; @@ -1027,11 +1027,11 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id, if (len <= 0) { ui__warning_once("CS ETM Trace: Missing DSO. Use 'perf archive' or debuginfod to export data from the traced system.\n" " Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n"); - if (!dso->auxtrace_warned) { + if (!dso__auxtrace_warned(dso)) { pr_err("CS ETM Trace: Debug data not found for address %#"PRIx64" in %s\n", - address, - dso->long_name ? dso->long_name : "Unknown"); - dso->auxtrace_warned = true; + address, + dso__long_name(dso) ? dso__long_name(dso) : "Unknown"); + dso__set_auxtrace_warned(dso); } goto out; } diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c index 72aec8f61b94..e10558b79504 100644 --- a/tools/perf/util/disasm.c +++ b/tools/perf/util/disasm.c @@ -1199,7 +1199,7 @@ static int symbol__disassemble_bpf(struct symbol *sym, int ret; FILE *s; - if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO) + if (dso__binary_type(dso) != DSO_BINARY_TYPE__BPF_PROG_INFO) return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE; pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__, @@ -1226,14 +1226,14 @@ static int symbol__disassemble_bpf(struct symbol *sym, info.arch = bfd_get_arch(bfdf); info.mach = bfd_get_mach(bfdf); - info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, - dso->bpf_prog.id); + info_node = perf_env__find_bpf_prog_info(dso__bpf_prog(dso)->env, + dso__bpf_prog(dso)->id); if (!info_node) { ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF; goto out; } info_linear = info_node->info_linear; - sub_id = dso->bpf_prog.sub_id; + sub_id = dso__bpf_prog(dso)->sub_id; info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns); info.buffer_length = info_linear->info.jited_prog_len; @@ -1244,7 +1244,7 @@ static int symbol__disassemble_bpf(struct symbol *sym, if (info_linear->info.btf_id) { struct btf_node *node; - node = perf_env__find_btf(dso->bpf_prog.env, + node = perf_env__find_btf(dso__bpf_prog(dso)->env, info_linear->info.btf_id); if (node) btf = btf__new((__u8 *)(node->data), diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index dde706b71da7..67414944f245 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -1501,7 +1501,7 @@ void dso__delete(struct dso *dso) auxtrace_cache__free(RC_CHK_ACCESS(dso)->auxtrace_cache); dso_cache__free(dso); dso__free_a2l(dso); - zfree(&RC_CHK_ACCESS(dso)->symsrc_filename); + dso__free_symsrc_filename(dso); nsinfo__zput(RC_CHK_ACCESS(dso)->nsinfo); mutex_destroy(dso__lock(dso)); RC_CHK_FREE(dso); @@ -1652,3 +1652,15 @@ int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); return 0; } + +bool perf_pid_map_tid(const char *dso_name, int *tid) +{ + return sscanf(dso_name, "/tmp/perf-%d.map", tid) == 1; +} + +bool is_perf_pid_map_name(const char *dso_name) +{ + int tid; + + return perf_pid_map_tid(dso_name, &tid); +} diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index df2c98402af3..ed0068251c65 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -280,6 +280,16 @@ static inline void dso__set_annotate_warned(struct dso *dso) RC_CHK_ACCESS(dso)->annotate_warned = 1; } +static inline bool dso__auxtrace_warned(const struct dso *dso) +{ + return RC_CHK_ACCESS(dso)->auxtrace_warned; +} + +static inline void dso__set_auxtrace_warned(struct dso *dso) +{ + RC_CHK_ACCESS(dso)->auxtrace_warned = 1; +} + static inline struct auxtrace_cache *dso__auxtrace_cache(struct dso *dso) { return RC_CHK_ACCESS(dso)->auxtrace_cache; @@ -592,6 +602,11 @@ static inline void dso__set_symsrc_filename(struct dso *dso, char *val) RC_CHK_ACCESS(dso)->symsrc_filename = val; } +static inline void dso__free_symsrc_filename(struct dso *dso) +{ + zfree(&RC_CHK_ACCESS(dso)->symsrc_filename); +} + static inline enum dso_binary_type dso__symtab_type(const struct dso *dso) { return RC_CHK_ACCESS(dso)->symtab_type; @@ -809,4 +824,8 @@ void reset_fd_limit(void); u64 dso__find_global_type(struct dso *dso, u64 addr); u64 dso__findnew_global_type(struct dso *dso, u64 addr, u64 offset); +/* Check if dso name is of format "/tmp/perf-%d.map" */ +bool perf_pid_map_tid(const char *dso_name, int *tid); +bool is_perf_pid_map_name(const char *dso_name); + #endif /* __PERF_DSO */ diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index 16b39db594f4..eaada3e0f5b4 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -741,7 +741,6 @@ static unsigned int first_ending_after(struct maps *maps, const struct map *map) */ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) { - struct map **maps_by_address; int err = 0; FILE *fp = debug_file(); @@ -749,12 +748,12 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) if (!maps__maps_by_address_sorted(maps)) __maps__sort_by_address(maps); - maps_by_address = maps__maps_by_address(maps); /* * Iterate through entries where the end of the existing entry is * greater-than the new map's start. */ for (unsigned int i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) { + struct map **maps_by_address = maps__maps_by_address(maps); struct map *pos = maps_by_address[i]; struct map *before = NULL, *after = NULL; @@ -821,8 +820,10 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) /* Maps are still ordered, go to next one. */ i++; if (after) { - __maps__insert(maps, after); + err = __maps__insert(maps, after); map__put(after); + if (err) + goto out_err; if (!maps__maps_by_address_sorted(maps)) { /* * Sorting broken so invariants don't @@ -851,7 +852,7 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) check_invariants(maps); } /* Add the map. */ - __maps__insert(maps, new); + err = __maps__insert(maps, new); out_err: return err; } diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c index b9b4c5eb5002..6907e3e7fbd1 100644 --- a/tools/perf/util/pmus.c +++ b/tools/perf/util/pmus.c @@ -477,8 +477,8 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p qsort(aliases, len, sizeof(struct sevent), cmp_sevent); for (int j = 0; j < len; j++) { /* Skip duplicates */ - if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1])) - continue; + if (j < len - 1 && pmu_alias_is_duplicate(&aliases[j], &aliases[j + 1])) + goto free; print_cb->print_event(print_state, aliases[j].pmu_name, @@ -491,6 +491,7 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p aliases[j].desc, aliases[j].long_desc, aliases[j].encoding_desc); +free: zfree(&aliases[j].name); zfree(&aliases[j].alias); zfree(&aliases[j].scale_unit); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index cd39ea972193..ab7c7ff35f9b 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -334,7 +334,7 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) * comparing symbol address alone is not enough since it's a * relative address within a dso. */ - if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { + if (!hists__has(left->hists, dso)) { ret = sort__dso_cmp(left, right); if (ret != 0) return ret; diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c index 9d670d8c1c08..4d67c1e095a1 100644 --- a/tools/perf/util/srcline.c +++ b/tools/perf/util/srcline.c @@ -288,7 +288,7 @@ static int inline_list__append_dso_a2l(struct dso *dso, struct inline_node *node, struct symbol *sym) { - struct a2l_data *a2l = dso->a2l; + struct a2l_data *a2l = dso__a2l(dso); struct symbol *inline_sym = new_inline_sym(dso, sym, a2l->funcname); char *srcline = NULL; @@ -304,11 +304,11 @@ static int addr2line(const char *dso_name, u64 addr, struct symbol *sym) { int ret = 0; - struct a2l_data *a2l = dso->a2l; + struct a2l_data *a2l = dso__a2l(dso); if (!a2l) { - dso->a2l = addr2line_init(dso_name); - a2l = dso->a2l; + a2l = addr2line_init(dso_name); + dso__set_a2l(dso, a2l); } if (a2l == NULL) { @@ -360,14 +360,14 @@ static int addr2line(const char *dso_name, u64 addr, void dso__free_a2l(struct dso *dso) { - struct a2l_data *a2l = dso->a2l; + struct a2l_data *a2l = dso__a2l(dso); if (!a2l) return; addr2line_cleanup(a2l); - dso->a2l = NULL; + dso__set_a2l(dso, NULL); } #else /* HAVE_LIBBFD_SUPPORT */ diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 91d2f7f65df7..186305fd2d0e 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -38,6 +38,7 @@ static int aggr_header_lens[] = { [AGGR_CORE] = 18, [AGGR_CACHE] = 22, + [AGGR_CLUSTER] = 20, [AGGR_DIE] = 12, [AGGR_SOCKET] = 6, [AGGR_NODE] = 6, @@ -49,6 +50,7 @@ static int aggr_header_lens[] = { static const char *aggr_header_csv[] = { [AGGR_CORE] = "core,cpus,", [AGGR_CACHE] = "cache,cpus,", + [AGGR_CLUSTER] = "cluster,cpus,", [AGGR_DIE] = "die,cpus,", [AGGR_SOCKET] = "socket,cpus,", [AGGR_NONE] = "cpu,", @@ -60,6 +62,7 @@ static const char *aggr_header_csv[] = { static const char *aggr_header_std[] = { [AGGR_CORE] = "core", [AGGR_CACHE] = "cache", + [AGGR_CLUSTER] = "cluster", [AGGR_DIE] = "die", [AGGR_SOCKET] = "socket", [AGGR_NONE] = "cpu", diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 3466aa952442..6bb975e46de3 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -176,6 +176,13 @@ static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type if (type != evsel__stat_type(cur)) continue; + /* + * Except the SW CLOCK events, + * ignore if not the PMU we're looking for. + */ + if ((type != STAT_NSECS) && (evsel->pmu != cur->pmu)) + continue; + aggr = &cur->stats->aggr[aggr_idx]; if (type == STAT_NSECS) return aggr->counts.val; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 9e5940b5bc59..22646f0cca7d 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1607,7 +1607,7 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile) if (!bfd_check_format(abfd, bfd_object)) { pr_debug2("%s: cannot read %s bfd file.\n", __func__, - dso->long_name); + dso__long_name(dso)); goto out_close; } @@ -1640,12 +1640,13 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile) } if (i < symbols_count) { /* PE symbols can only have 4 bytes, so use .text high bits */ - dso->text_offset = section->vma - (u32)section->vma; - dso->text_offset += (u32)bfd_asymbol_value(symbols[i]); - dso->text_end = (section->vma - dso->text_offset) + section->size; + u64 text_offset = (section->vma - (u32)section->vma) + + (u32)bfd_asymbol_value(symbols[i]); + dso__set_text_offset(dso, text_offset); + dso__set_text_end(dso, (section->vma - text_offset) + section->size); } else { - dso->text_offset = section->vma - section->filepos; - dso->text_end = section->filepos + section->size; + dso__set_text_offset(dso, section->vma - section->filepos); + dso__set_text_end(dso, section->filepos + section->size); } } @@ -1671,7 +1672,7 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile) else len = section->size - sym->value; - start = bfd_asymbol_value(sym) - dso->text_offset; + start = bfd_asymbol_value(sym) - dso__text_offset(dso); symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC, bfd_asymbol_name(sym)); if (!symbol) @@ -1799,7 +1800,8 @@ int dso__load(struct dso *dso, struct map *map) const char *map_path = dso__long_name(dso); mutex_lock(dso__lock(dso)); - perfmap = strncmp(dso__name(dso), "/tmp/perf-", 10) == 0; + perfmap = is_perf_pid_map_name(map_path); + if (perfmap) { if (dso__nsinfo(dso) && (dso__find_perf_map(newmapname, sizeof(newmapname), diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index b38d322734b4..bde216e630d2 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -29,8 +29,8 @@ static int __find_debuginfo(Dwfl_Module *mod __maybe_unused, void **userdata, const struct dso *dso = *userdata; assert(dso); - if (dso->symsrc_filename && strcmp (file_name, dso->symsrc_filename)) - *debuginfo_file_name = strdup(dso->symsrc_filename); + if (dso__symsrc_filename(dso) && strcmp(file_name, dso__symsrc_filename(dso))) + *debuginfo_file_name = strdup(dso__symsrc_filename(dso)); return -1; } @@ -66,7 +66,7 @@ static int __report_module(struct addr_location *al, u64 ip, * a different code in another DSO. So just use the map->start * directly to pick the correct one. */ - if (!strncmp(dso->long_name, "/tmp/jitted-", 12)) + if (!strncmp(dso__long_name(dso), "/tmp/jitted-", 12)) base = map__start(al->map); else base = map__start(al->map) - map__pgoff(al->map); @@ -83,15 +83,15 @@ static int __report_module(struct addr_location *al, u64 ip, if (!mod) { char filename[PATH_MAX]; - __symbol__join_symfs(filename, sizeof(filename), dso->long_name); - mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1, + __symbol__join_symfs(filename, sizeof(filename), dso__long_name(dso)); + mod = dwfl_report_elf(ui->dwfl, dso__short_name(dso), filename, -1, base, false); } if (!mod) { char filename[PATH_MAX]; if (dso__build_id_filename(dso, filename, sizeof(filename), false)) - mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1, + mod = dwfl_report_elf(ui->dwfl, dso__short_name(dso), filename, -1, base, false); } diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c index cde267ea3e99..7460bb96bd22 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c @@ -363,7 +363,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso, struct machine *machine, u64 *offset) { int fd; - u64 ofs = dso->data.debug_frame_offset; + u64 ofs = dso__data(dso)->debug_frame_offset; /* debug_frame can reside in: * - dso @@ -379,7 +379,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso, } if (ofs <= 0) { - fd = open(dso->symsrc_filename, O_RDONLY); + fd = open(dso__symsrc_filename(dso), O_RDONLY); if (fd >= 0) { ofs = elf_section_offset(fd, ".debug_frame"); close(fd); @@ -402,21 +402,21 @@ static int read_unwind_spec_debug_frame(struct dso *dso, } } if (ofs > 0) { - if (dso->symsrc_filename != NULL) { + if (dso__symsrc_filename(dso) != NULL) { pr_warning( "%s: overwrite symsrc(%s,%s)\n", __func__, - dso->symsrc_filename, + dso__symsrc_filename(dso), debuglink); - zfree(&dso->symsrc_filename); + dso__free_symsrc_filename(dso); } - dso->symsrc_filename = debuglink; + dso__set_symsrc_filename(dso, debuglink); } else { free(debuglink); } } - dso->data.debug_frame_offset = ofs; + dso__data(dso)->debug_frame_offset = ofs; } *offset = ofs; @@ -481,7 +481,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, if (ret < 0 && !read_unwind_spec_debug_frame(dso, ui->machine, &segbase)) { int fd = dso__data_get_fd(dso, ui->machine); - int is_exec = elf_is_exec(fd, dso->name); + int is_exec = elf_is_exec(fd, dso__name(dso)); u64 start = map__start(map); unw_word_t base = is_exec ? 0 : start; const char *symfile; @@ -489,7 +489,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, if (fd >= 0) dso__data_put_fd(dso); - symfile = dso->symsrc_filename ?: dso->name; + symfile = dso__symsrc_filename(dso) ?: dso__name(dso); memset(&di, 0, sizeof(di)); if (dwarf_find_debug_frame(0, &di, ip, base, symfile, start, map__end(map))) diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h index be91a6919315..3b6675ab4086 100644 --- a/tools/testing/selftests/bpf/bpf_kfuncs.h +++ b/tools/testing/selftests/bpf/bpf_kfuncs.h @@ -77,5 +77,5 @@ extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr, struct bpf_key *trusted_keyring) __ksym; extern bool bpf_session_is_return(void) __ksym __weak; -extern long *bpf_session_cookie(void) __ksym __weak; +extern __u64 *bpf_session_cookie(void) __ksym __weak; #endif diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 0aca02532794..3f0daf660703 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -307,7 +307,8 @@ static void test_update_ca(void) return; link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); - ASSERT_OK_PTR(link, "attach_struct_ops"); + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) + goto out; do_test("tcp_ca_update", NULL); saved_ca1_cnt = skel->bss->ca1_cnt; @@ -321,6 +322,7 @@ static void test_update_ca(void) ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt"); bpf_link__destroy(link); +out: tcp_ca_update__destroy(skel); } @@ -336,7 +338,8 @@ static void test_update_wrong(void) return; link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); - ASSERT_OK_PTR(link, "attach_struct_ops"); + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) + goto out; do_test("tcp_ca_update", NULL); saved_ca1_cnt = skel->bss->ca1_cnt; @@ -349,6 +352,7 @@ static void test_update_wrong(void) ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt"); bpf_link__destroy(link); +out: tcp_ca_update__destroy(skel); } @@ -363,7 +367,8 @@ static void test_mixed_links(void) return; link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link); - ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl"); + if (!ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl")) + goto out; link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); ASSERT_OK_PTR(link, "attach_struct_ops"); @@ -376,6 +381,7 @@ static void test_mixed_links(void) bpf_link__destroy(link); bpf_link__destroy(link_nl); +out: tcp_ca_update__destroy(skel); } @@ -418,7 +424,8 @@ static void test_link_replace(void) bpf_link__destroy(link); link = bpf_map__attach_struct_ops(skel->maps.ca_update_2); - ASSERT_OK_PTR(link, "attach_struct_ops_2nd"); + if (!ASSERT_OK_PTR(link, "attach_struct_ops_2nd")) + goto out; /* BPF_F_REPLACE with a wrong old map Fd. It should fail! * @@ -441,6 +448,7 @@ static void test_link_replace(void) bpf_link__destroy(link); +out: tcp_ca_update__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c index f949647dbbc2..552a0875ca6d 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c @@ -21,13 +21,13 @@ static int do_sleep(void *skel) } #define STACK_SIZE (1024 * 1024) -static char child_stack[STACK_SIZE]; void test_fexit_sleep(void) { struct fexit_sleep_lskel *fexit_skel = NULL; int wstatus, duration = 0; pid_t cpid; + char *child_stack = NULL; int err, fexit_cnt; fexit_skel = fexit_sleep_lskel__open_and_load(); @@ -38,6 +38,11 @@ void test_fexit_sleep(void) if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err)) goto cleanup; + child_stack = mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | + MAP_ANONYMOUS | MAP_STACK, -1, 0); + if (!ASSERT_NEQ(child_stack, MAP_FAILED, "mmap")) + goto cleanup; + cpid = clone(do_sleep, child_stack + STACK_SIZE, CLONE_FILES | SIGCHLD, fexit_skel); if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno))) goto cleanup; @@ -78,5 +83,6 @@ void test_fexit_sleep(void) goto cleanup; cleanup: + munmap(child_stack, STACK_SIZE); fexit_sleep_lskel__destroy(fexit_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index 597d0467a926..de2466547efe 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -994,7 +994,7 @@ static void drop_on_reuseport(const struct test *t) err = update_lookup_map(t->sock_map, SERVER_A, server1); if (err) - goto detach; + goto close_srv1; /* second server on destination address we should never reach */ server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port, diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index f09505f8b038..53d6ad8c2257 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -222,7 +222,7 @@ static void test_xdp_adjust_frags_tail_grow(void) prog = bpf_object__next_program(obj, NULL); if (bpf_object__load(obj)) - return; + goto out; prog_fd = bpf_program__fd(prog); diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c index ba97165bdb28..a657651eba52 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_multidim.c @@ -14,9 +14,9 @@ typedef int *ptr_arr_t[6]; typedef int *ptr_multiarr_t[7][8][9][10]; -typedef int * (*fn_ptr_arr_t[11])(); +typedef int * (*fn_ptr_arr_t[11])(void); -typedef int * (*fn_ptr_multiarr_t[12][13])(); +typedef int * (*fn_ptr_multiarr_t[12][13])(void); struct root_struct { arr_t _1; diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c index ad21ee8c7e23..29d01fff32bd 100644 --- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c +++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c @@ -100,7 +100,7 @@ typedef void (*printf_fn_t)(const char *, ...); * `int -> char *` function and returns pointer to a char. Equivalent: * typedef char * (*fn_input_t)(int); * typedef char * (*fn_output_outer_t)(fn_input_t); - * typedef const fn_output_outer_t (* fn_output_inner_t)(); + * typedef const fn_output_outer_t (* fn_output_inner_t)(void); * typedef const fn_output_inner_t fn_ptr_arr2_t[5]; */ /* ----- START-EXPECTED-OUTPUT ----- */ @@ -127,7 +127,7 @@ typedef void (* (*signal_t)(int, void (*)(int)))(int); typedef char * (*fn_ptr_arr1_t[10])(int **); -typedef char * (* (* const fn_ptr_arr2_t[5])())(char * (*)(int)); +typedef char * (* (* const fn_ptr_arr2_t[5])(void))(char * (*)(int)); struct struct_w_typedefs { int_t a; diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c index d49070803e22..0835b5edf685 100644 --- a/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c +++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c @@ -25,7 +25,7 @@ int BPF_PROG(trigger) static int check_cookie(__u64 val, __u64 *result) { - long *cookie; + __u64 *cookie; if (bpf_get_current_pid_tgid() >> 32 != pid) return 1; diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 92752f5eeded..a709911cddd2 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -63,7 +63,7 @@ int passed; int failed; int map_fd[9]; struct bpf_map *maps[9]; -int prog_fd[11]; +int prog_fd[9]; int txmsg_pass; int txmsg_redir; @@ -680,7 +680,8 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, } } - s->bytes_recvd += recv; + if (recv > 0) + s->bytes_recvd += recv; if (opt->check_recved_len && s->bytes_recvd > total_bytes) { errno = EMSGSIZE; @@ -1793,8 +1794,6 @@ int prog_attach_type[] = { BPF_SK_MSG_VERDICT, BPF_SK_MSG_VERDICT, BPF_SK_MSG_VERDICT, - BPF_SK_MSG_VERDICT, - BPF_SK_MSG_VERDICT, }; int prog_type[] = { @@ -1807,8 +1806,6 @@ int prog_type[] = { BPF_PROG_TYPE_SK_MSG, BPF_PROG_TYPE_SK_MSG, BPF_PROG_TYPE_SK_MSG, - BPF_PROG_TYPE_SK_MSG, - BPF_PROG_TYPE_SK_MSG, }; static int populate_progs(char *bpf_file) diff --git a/tools/testing/selftests/damon/access_memory.c b/tools/testing/selftests/damon/access_memory.c index 585a2fa54329..56b17e8fe1be 100644 --- a/tools/testing/selftests/damon/access_memory.c +++ b/tools/testing/selftests/damon/access_memory.c @@ -35,7 +35,7 @@ int main(int argc, char *argv[]) start_clock = clock(); while ((clock() - start_clock) * 1000 / CLOCKS_PER_SEC < access_time_ms) - memset(regions[i], i, 1024 * 1024 * 10); + memset(regions[i], i, sz_region); } return 0; } diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh index 31252bc8775e..4994bea5daf8 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh @@ -11,7 +11,7 @@ ALL_TESTS="single_mask_test identical_filters_test two_masks_test \ multiple_masks_test ctcam_edge_cases_test delta_simple_test \ delta_two_masks_one_key_test delta_simple_rehash_test \ bloom_simple_test bloom_complex_test bloom_delta_test \ - max_erp_entries_test max_group_size_test" + max_erp_entries_test max_group_size_test collision_test" NUM_NETIFS=2 source $lib_dir/lib.sh source $lib_dir/tc_common.sh @@ -457,7 +457,7 @@ delta_two_masks_one_key_test() { # If 2 keys are the same and only differ in mask in a way that # they belong under the same ERP (second is delta of the first), - # there should be no C-TCAM spill. + # there should be C-TCAM spill. RET=0 @@ -474,8 +474,8 @@ delta_two_masks_one_key_test() tp_record "mlxsw:*" "tc filter add dev $h2 ingress protocol ip \ pref 2 handle 102 flower $tcflags dst_ip 192.0.2.2 \ action drop" - tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 0 - check_err $? "incorrect C-TCAM spill while inserting the second rule" + tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 1 + check_err $? "C-TCAM spill did not happen while inserting the second rule" $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ -t ip -q @@ -1087,6 +1087,53 @@ max_group_size_test() log_test "max ACL group size test ($tcflags). max size $max_size" } +collision_test() +{ + # Filters cannot share an eRP if in the common unmasked part (i.e., + # without the delta bits) they have the same values. If the driver does + # not prevent such configuration (by spilling into the C-TCAM), then + # multiple entries will be present in the device with the same key, + # leading to collisions and a reduced scale. + # + # Create such a scenario and make sure all the filters are successfully + # added. + + RET=0 + + local ret + + if [[ "$tcflags" != "skip_sw" ]]; then + return 0; + fi + + # Add a single dst_ip/24 filter and multiple dst_ip/32 filters that all + # have the same values in the common unmasked part (dst_ip/24). + + tc filter add dev $h2 ingress pref 1 proto ipv4 handle 101 \ + flower $tcflags dst_ip 198.51.100.0/24 \ + action drop + + for i in {0..255}; do + tc filter add dev $h2 ingress pref 2 proto ipv4 \ + handle $((102 + i)) \ + flower $tcflags dst_ip 198.51.100.${i}/32 \ + action drop + ret=$? + [[ $ret -ne 0 ]] && break + done + + check_err $ret "failed to add all the filters" + + for i in {255..0}; do + tc filter del dev $h2 ingress pref 2 proto ipv4 \ + handle $((102 + i)) flower + done + + tc filter del dev $h2 ingress pref 1 proto ipv4 handle 101 flower + + log_test "collision test ($tcflags)" +} + setup_prepare() { h1=${NETIFS[p1]} diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index edf1c99c9936..5f7d5a5ba89b 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -1722,10 +1722,17 @@ FIXTURE_VARIANT(iommufd_dirty_tracking) FIXTURE_SETUP(iommufd_dirty_tracking) { + unsigned long size; int mmap_flags; void *vrc; int rc; + if (variant->buffer_size < MOCK_PAGE_SIZE) { + SKIP(return, + "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu", + variant->buffer_size, MOCK_PAGE_SIZE); + } + self->fd = open("/dev/iommu", O_RDWR); ASSERT_NE(-1, self->fd); @@ -1749,12 +1756,11 @@ FIXTURE_SETUP(iommufd_dirty_tracking) assert(vrc == self->buffer); self->page_size = MOCK_PAGE_SIZE; - self->bitmap_size = - variant->buffer_size / self->page_size / BITS_PER_BYTE; + self->bitmap_size = variant->buffer_size / self->page_size; /* Provision with an extra (PAGE_SIZE) for the unaligned case */ - rc = posix_memalign(&self->bitmap, PAGE_SIZE, - self->bitmap_size + PAGE_SIZE); + size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE); + rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE); assert(!rc); assert(self->bitmap); assert((uintptr_t)self->bitmap % PAGE_SIZE == 0); @@ -1775,51 +1781,63 @@ FIXTURE_SETUP(iommufd_dirty_tracking) FIXTURE_TEARDOWN(iommufd_dirty_tracking) { munmap(self->buffer, variant->buffer_size); - munmap(self->bitmap, self->bitmap_size); + munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE)); teardown_iommufd(self->fd, _metadata); } -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k) +{ + /* half of an u8 index bitmap */ + .buffer_size = 8UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k) +{ + /* one u8 index bitmap */ + .buffer_size = 16UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k) { /* one u32 index bitmap */ - .buffer_size = 128UL * 1024UL, + .buffer_size = 64UL * 1024UL, }; -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256k) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k) { /* one u64 index bitmap */ - .buffer_size = 256UL * 1024UL, + .buffer_size = 128UL * 1024UL, }; -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty640k) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k) { /* two u64 index and trailing end bitmap */ - .buffer_size = 640UL * 1024UL, + .buffer_size = 320UL * 1024UL, }; -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M) { - /* 4K bitmap (128M IOVA range) */ - .buffer_size = 128UL * 1024UL * 1024UL, + /* 4K bitmap (64M IOVA range) */ + .buffer_size = 64UL * 1024UL * 1024UL, }; -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge) { - /* 4K bitmap (128M IOVA range) */ - .buffer_size = 128UL * 1024UL * 1024UL, + /* 4K bitmap (64M IOVA range) */ + .buffer_size = 64UL * 1024UL * 1024UL, .hugepages = true, }; -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M) { - /* 8K bitmap (256M IOVA range) */ - .buffer_size = 256UL * 1024UL * 1024UL, + /* 8K bitmap (128M IOVA range) */ + .buffer_size = 128UL * 1024UL * 1024UL, }; -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M_huge) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge) { - /* 8K bitmap (256M IOVA range) */ - .buffer_size = 256UL * 1024UL * 1024UL, + /* 8K bitmap (128M IOVA range) */ + .buffer_size = 128UL * 1024UL * 1024UL, .hugepages = true, }; diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index 8d2b46b2114d..c612fbf0195b 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -22,6 +22,8 @@ #define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG)) #define BIT_WORD(nr) ((nr) / __BITS_PER_LONG) +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + static inline void set_bit(unsigned int nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); @@ -346,12 +348,12 @@ static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length, static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length, __u64 iova, size_t page_size, size_t pte_page_size, __u64 *bitmap, - __u64 bitmap_size, __u32 flags, + __u64 nbits, __u32 flags, struct __test_metadata *_metadata) { unsigned long npte = pte_page_size / page_size, pteset = 2 * npte; - unsigned long nbits = bitmap_size * BITS_PER_BYTE; unsigned long j, i, nr = nbits / pteset ?: 1; + unsigned long bitmap_size = DIV_ROUND_UP(nbits, BITS_PER_BYTE); __u64 out_dirty = 0; /* Mark all even bits as dirty in the mock domain */ diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c index 3c1e9f35b531..3b26bf3cf5b9 100644 --- a/tools/testing/selftests/landlock/base_test.c +++ b/tools/testing/selftests/landlock/base_test.c @@ -9,6 +9,7 @@ #define _GNU_SOURCE #include <errno.h> #include <fcntl.h> +#include <linux/keyctl.h> #include <linux/landlock.h> #include <string.h> #include <sys/prctl.h> @@ -326,4 +327,77 @@ TEST(ruleset_fd_transfer) ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); } +TEST(cred_transfer) +{ + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR, + }; + int ruleset_fd, dir_fd; + pid_t child; + int status; + + drop_caps(_metadata); + + dir_fd = open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC); + EXPECT_LE(0, dir_fd); + EXPECT_EQ(0, close(dir_fd)); + + /* Denies opening directories. */ + ruleset_fd = + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); + ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0)); + EXPECT_EQ(0, close(ruleset_fd)); + + /* Checks ruleset enforcement. */ + EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC)); + EXPECT_EQ(EACCES, errno); + + /* Needed for KEYCTL_SESSION_TO_PARENT permission checks */ + EXPECT_NE(-1, syscall(__NR_keyctl, KEYCTL_JOIN_SESSION_KEYRING, NULL, 0, + 0, 0)) + { + TH_LOG("Failed to join session keyring: %s", strerror(errno)); + } + + child = fork(); + ASSERT_LE(0, child); + if (child == 0) { + /* Checks ruleset enforcement. */ + EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC)); + EXPECT_EQ(EACCES, errno); + + /* + * KEYCTL_SESSION_TO_PARENT is a no-op unless we have a + * different session keyring in the child, so make that happen. + */ + EXPECT_NE(-1, syscall(__NR_keyctl, KEYCTL_JOIN_SESSION_KEYRING, + NULL, 0, 0, 0)); + + /* + * KEYCTL_SESSION_TO_PARENT installs credentials on the parent + * that never go through the cred_prepare hook, this path uses + * cred_transfer instead. + */ + EXPECT_EQ(0, syscall(__NR_keyctl, KEYCTL_SESSION_TO_PARENT, 0, + 0, 0, 0)); + + /* Re-checks ruleset enforcement. */ + EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC)); + EXPECT_EQ(EACCES, errno); + + _exit(_metadata->exit_code); + return; + } + + EXPECT_EQ(child, waitpid(child, &status, 0)); + EXPECT_EQ(1, WIFEXITED(status)); + EXPECT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); + + /* Re-checks ruleset enforcement. */ + EXPECT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC)); + EXPECT_EQ(EACCES, errno); +} + TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config index 0086efaa7b68..29af19c4e9f9 100644 --- a/tools/testing/selftests/landlock/config +++ b/tools/testing/selftests/landlock/config @@ -2,6 +2,7 @@ CONFIG_CGROUPS=y CONFIG_CGROUP_SCHED=y CONFIG_INET=y CONFIG_IPV6=y +CONFIG_KEYS=y CONFIG_NET=y CONFIG_NET_NS=y CONFIG_OVERLAY_FS=y diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 73895711cdf4..5f3c28fc8624 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -1737,53 +1737,53 @@ ipv4_rt_dsfield() # DSCP 0x10 should match the specific route, no matter the ECN bits $IP route get fibmatch 172.16.102.1 dsfield 0x10 | \ - grep -q "via 172.16.103.2" + grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2" log_test $? 0 "IPv4 route with DSCP and ECN:Not-ECT" $IP route get fibmatch 172.16.102.1 dsfield 0x11 | \ - grep -q "via 172.16.103.2" + grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2" log_test $? 0 "IPv4 route with DSCP and ECN:ECT(1)" $IP route get fibmatch 172.16.102.1 dsfield 0x12 | \ - grep -q "via 172.16.103.2" + grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2" log_test $? 0 "IPv4 route with DSCP and ECN:ECT(0)" $IP route get fibmatch 172.16.102.1 dsfield 0x13 | \ - grep -q "via 172.16.103.2" + grep -q "172.16.102.0/24 tos 0x10 via 172.16.103.2" log_test $? 0 "IPv4 route with DSCP and ECN:CE" # Unknown DSCP should match the generic route, no matter the ECN bits $IP route get fibmatch 172.16.102.1 dsfield 0x14 | \ - grep -q "via 172.16.101.2" + grep -q "172.16.102.0/24 via 172.16.101.2" log_test $? 0 "IPv4 route with unknown DSCP and ECN:Not-ECT" $IP route get fibmatch 172.16.102.1 dsfield 0x15 | \ - grep -q "via 172.16.101.2" + grep -q "172.16.102.0/24 via 172.16.101.2" log_test $? 0 "IPv4 route with unknown DSCP and ECN:ECT(1)" $IP route get fibmatch 172.16.102.1 dsfield 0x16 | \ - grep -q "via 172.16.101.2" + grep -q "172.16.102.0/24 via 172.16.101.2" log_test $? 0 "IPv4 route with unknown DSCP and ECN:ECT(0)" $IP route get fibmatch 172.16.102.1 dsfield 0x17 | \ - grep -q "via 172.16.101.2" + grep -q "172.16.102.0/24 via 172.16.101.2" log_test $? 0 "IPv4 route with unknown DSCP and ECN:CE" # Null DSCP should match the generic route, no matter the ECN bits $IP route get fibmatch 172.16.102.1 dsfield 0x00 | \ - grep -q "via 172.16.101.2" + grep -q "172.16.102.0/24 via 172.16.101.2" log_test $? 0 "IPv4 route with no DSCP and ECN:Not-ECT" $IP route get fibmatch 172.16.102.1 dsfield 0x01 | \ - grep -q "via 172.16.101.2" + grep -q "172.16.102.0/24 via 172.16.101.2" log_test $? 0 "IPv4 route with no DSCP and ECN:ECT(1)" $IP route get fibmatch 172.16.102.1 dsfield 0x02 | \ - grep -q "via 172.16.101.2" + grep -q "172.16.102.0/24 via 172.16.101.2" log_test $? 0 "IPv4 route with no DSCP and ECN:ECT(0)" $IP route get fibmatch 172.16.102.1 dsfield 0x03 | \ - grep -q "via 172.16.101.2" + grep -q "172.16.102.0/24 via 172.16.101.2" log_test $? 0 "IPv4 route with no DSCP and ECN:CE" } diff --git a/tools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh b/tools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh index 0760a34b7114..a21b7085da2e 100755 --- a/tools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh +++ b/tools/testing/selftests/net/forwarding/bridge_fdb_learning_limit.sh @@ -178,6 +178,22 @@ fdb_del() check_err $? "Failed to remove a FDB entry of type ${type}" } +check_fdb_n_learned_support() +{ + if ! ip link help bridge 2>&1 | grep -q "fdb_max_learned"; then + echo "SKIP: iproute2 too old, missing bridge max learned support" + exit $ksft_skip + fi + + ip link add dev br0 type bridge + local learned=$(fdb_get_n_learned) + ip link del dev br0 + if [ "$learned" == "null" ]; then + echo "SKIP: kernel too old; bridge fdb_n_learned feature not supported." + exit $ksft_skip + fi +} + check_accounting_one_type() { local type=$1 is_counted=$2 overrides_learned=$3 @@ -274,6 +290,8 @@ check_limit() done } +check_fdb_n_learned_support + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh index f1de525cfa55..62a05bca1e82 100644 --- a/tools/testing/selftests/net/forwarding/devlink_lib.sh +++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh @@ -122,6 +122,8 @@ devlink_reload() still_pending=$(devlink resource show "$DEVLINK_DEV" | \ grep -c "size_new") check_err $still_pending "Failed reload - There are still unset sizes" + + udevadm settle } declare -A DEVLINK_ORIG diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c index 94bb6e11c16f..994477ee87be 100644 --- a/tools/testing/selftests/nolibc/nolibc-test.c +++ b/tools/testing/selftests/nolibc/nolibc-test.c @@ -607,7 +607,7 @@ int expect_strne(const char *expr, int llen, const char *cmp) static __attribute__((unused)) int expect_str_buf_eq(size_t expr, const char *buf, size_t val, int llen, const char *cmp) { - llen += printf(" = %lu <%s> ", expr, buf); + llen += printf(" = %lu <%s> ", (unsigned long)expr, buf); if (strcmp(buf, cmp) != 0) { result(llen, FAIL); return 1; diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c index 445f306d4c2f..f55f5989de72 100644 --- a/tools/testing/selftests/resctrl/resctrl_val.c +++ b/tools/testing/selftests/resctrl/resctrl_val.c @@ -293,6 +293,18 @@ static int initialize_mem_bw_imc(void) return 0; } +static void perf_close_imc_mem_bw(void) +{ + int mc; + + for (mc = 0; mc < imcs; mc++) { + if (imc_counters_config[mc][READ].fd != -1) + close(imc_counters_config[mc][READ].fd); + if (imc_counters_config[mc][WRITE].fd != -1) + close(imc_counters_config[mc][WRITE].fd); + } +} + /* * get_mem_bw_imc: Memory band width as reported by iMC counters * @cpu_no: CPU number that the benchmark PID is binded to @@ -306,26 +318,33 @@ static int initialize_mem_bw_imc(void) static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc) { float reads, writes, of_mul_read, of_mul_write; - int imc, j, ret; + int imc, ret; + + for (imc = 0; imc < imcs; imc++) { + imc_counters_config[imc][READ].fd = -1; + imc_counters_config[imc][WRITE].fd = -1; + } /* Start all iMC counters to log values (both read and write) */ reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1; for (imc = 0; imc < imcs; imc++) { - for (j = 0; j < 2; j++) { - ret = open_perf_event(imc, cpu_no, j); - if (ret) - return -1; - } - for (j = 0; j < 2; j++) - membw_ioctl_perf_event_ioc_reset_enable(imc, j); + ret = open_perf_event(imc, cpu_no, READ); + if (ret) + goto close_fds; + ret = open_perf_event(imc, cpu_no, WRITE); + if (ret) + goto close_fds; + + membw_ioctl_perf_event_ioc_reset_enable(imc, READ); + membw_ioctl_perf_event_ioc_reset_enable(imc, WRITE); } sleep(1); /* Stop counters after a second to get results (both read and write) */ for (imc = 0; imc < imcs; imc++) { - for (j = 0; j < 2; j++) - membw_ioctl_perf_event_ioc_disable(imc, j); + membw_ioctl_perf_event_ioc_disable(imc, READ); + membw_ioctl_perf_event_ioc_disable(imc, WRITE); } /* @@ -341,15 +360,13 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc) if (read(r->fd, &r->return_value, sizeof(struct membw_read_format)) == -1) { ksft_perror("Couldn't get read b/w through iMC"); - - return -1; + goto close_fds; } if (read(w->fd, &w->return_value, sizeof(struct membw_read_format)) == -1) { ksft_perror("Couldn't get write bw through iMC"); - - return -1; + goto close_fds; } __u64 r_time_enabled = r->return_value.time_enabled; @@ -369,10 +386,7 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc) writes += w->return_value.value * of_mul_write * SCALE; } - for (imc = 0; imc < imcs; imc++) { - close(imc_counters_config[imc][READ].fd); - close(imc_counters_config[imc][WRITE].fd); - } + perf_close_imc_mem_bw(); if (strcmp(bw_report, "reads") == 0) { *bw_imc = reads; @@ -386,6 +400,10 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc) *bw_imc = reads + writes; return 0; + +close_fds: + perf_close_imc_mem_bw(); + return -1; } void set_mbm_path(const char *ctrlgrp, const char *mongrp, int domain_id) diff --git a/tools/testing/selftests/sigaltstack/current_stack_pointer.h b/tools/testing/selftests/sigaltstack/current_stack_pointer.h index ea9bdf3a90b1..09da8f1011ce 100644 --- a/tools/testing/selftests/sigaltstack/current_stack_pointer.h +++ b/tools/testing/selftests/sigaltstack/current_stack_pointer.h @@ -8,7 +8,7 @@ register unsigned long sp asm("sp"); register unsigned long sp asm("esp"); #elif __loongarch64 register unsigned long sp asm("$sp"); -#elif __ppc__ +#elif __powerpc__ register unsigned long sp asm("r1"); #elif __s390x__ register unsigned long sp asm("%15");