summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2022-09-28 05:55:39 -0400
committerMike Pagano <mpagano@gentoo.org>2022-09-28 05:55:39 -0400
commit27a162cbb4e6bf6258462f89e5da2c02364e125e (patch)
tree0c26efacbbe17f362f51b125ebee456253e8da79
parentUpdate patch directly from Linus' tree (diff)
downloadlinux-patches-5.19-14.tar.gz
linux-patches-5.19-14.tar.bz2
linux-patches-5.19-14.zip
Linux patch 5.19.125.19-14
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1011_linux-5.19.12.patch9776
2 files changed, 9780 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 591733a1..05763bb8 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch: 1010_linux-5.19.11.patch
From: http://www.kernel.org
Desc: Linux 5.19.11
+Patch: 1011_linux-5.19.12.patch
+From: http://www.kernel.org
+Desc: Linux 5.19.12
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1011_linux-5.19.12.patch b/1011_linux-5.19.12.patch
new file mode 100644
index 00000000..8c6e32f4
--- /dev/null
+++ b/1011_linux-5.19.12.patch
@@ -0,0 +1,9776 @@
+diff --git a/Makefile b/Makefile
+index 01463a22926d5..7df4c195c8ab2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 19
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Superb Owl
+
+diff --git a/arch/arm/boot/dts/lan966x.dtsi b/arch/arm/boot/dts/lan966x.dtsi
+index 38e90a31d2dd1..25c19f9d0a12f 100644
+--- a/arch/arm/boot/dts/lan966x.dtsi
++++ b/arch/arm/boot/dts/lan966x.dtsi
+@@ -515,13 +515,13 @@
+
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+- interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ phy1: ethernet-phy@2 {
+ reg = <2>;
+- interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts b/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts
+index 92eaf4ef45638..57ecdfa0dfc09 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts
+@@ -152,11 +152,11 @@
+ * CPLD_reset is RESET_SOFT in schematic
+ */
+ gpio-line-names =
+- "CPLD_D[1]", "CPLD_int", "CPLD_reset", "",
+- "", "CPLD_D[0]", "", "",
+- "", "", "", "CPLD_D[2]",
+- "CPLD_D[3]", "CPLD_D[4]", "CPLD_D[5]", "CPLD_D[6]",
+- "CPLD_D[7]", "", "", "",
++ "CPLD_D[6]", "CPLD_int", "CPLD_reset", "",
++ "", "CPLD_D[7]", "", "",
++ "", "", "", "CPLD_D[5]",
++ "CPLD_D[4]", "CPLD_D[3]", "CPLD_D[2]", "CPLD_D[1]",
++ "CPLD_D[0]", "", "", "",
+ "", "", "", "",
+ "", "", "", "KBD_intK",
+ "", "", "", "";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
+index 286d2df01cfa7..7e0aeb2db3054 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
+@@ -5,7 +5,6 @@
+
+ /dts-v1/;
+
+-#include <dt-bindings/phy/phy-imx8-pcie.h>
+ #include "imx8mm-tqma8mqml.dtsi"
+ #include "mba8mx.dtsi"
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
+index 16ee9b5179e6e..f649dfacb4b69 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
+@@ -3,6 +3,7 @@
+ * Copyright 2020-2021 TQ-Systems GmbH
+ */
+
++#include <dt-bindings/phy/phy-imx8-pcie.h>
+ #include "imx8mm.dtsi"
+
+ / {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index c2d4da25482ff..44b473494d0f5 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -359,8 +359,8 @@
+ nxp,dvs-standby-voltage = <850000>;
+ regulator-always-on;
+ regulator-boot-on;
+- regulator-max-microvolt = <950000>;
+- regulator-min-microvolt = <850000>;
++ regulator-max-microvolt = <1050000>;
++ regulator-min-microvolt = <805000>;
+ regulator-name = "On-module +VDD_ARM (BUCK2)";
+ regulator-ramp-delay = <3125>;
+ };
+@@ -368,8 +368,8 @@
+ reg_vdd_dram: BUCK3 {
+ regulator-always-on;
+ regulator-boot-on;
+- regulator-max-microvolt = <950000>;
+- regulator-min-microvolt = <850000>;
++ regulator-max-microvolt = <1000000>;
++ regulator-min-microvolt = <805000>;
+ regulator-name = "On-module +VDD_GPU_VPU_DDR (BUCK3)";
+ };
+
+@@ -408,7 +408,7 @@
+ reg_vdd_snvs: LDO2 {
+ regulator-always-on;
+ regulator-boot-on;
+- regulator-max-microvolt = <900000>;
++ regulator-max-microvolt = <800000>;
+ regulator-min-microvolt = <800000>;
+ regulator-name = "On-module +V0.8_SNVS (LDO2)";
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+index e41e1d56f980d..7bd4eecd592ef 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+@@ -672,7 +672,6 @@
+ <&clk IMX8MN_CLK_GPU_SHADER>,
+ <&clk IMX8MN_CLK_GPU_BUS_ROOT>,
+ <&clk IMX8MN_CLK_GPU_AHB>;
+- resets = <&src IMX8MQ_RESET_GPU_RESET>;
+ };
+
+ pgc_dispmix: power-domain@3 {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+index 6630ec561dc25..211e6a1b296e1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+@@ -123,8 +123,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_can>;
+ regulator-name = "can2_stby";
+- gpio = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+- enable-active-high;
++ gpio = <&gpio3 19 GPIO_ACTIVE_LOW>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+@@ -484,35 +483,40 @@
+ lan1: port@0 {
+ reg = <0>;
+ label = "lan1";
++ phy-mode = "internal";
+ local-mac-address = [00 00 00 00 00 00];
+ };
+
+ lan2: port@1 {
+ reg = <1>;
+ label = "lan2";
++ phy-mode = "internal";
+ local-mac-address = [00 00 00 00 00 00];
+ };
+
+ lan3: port@2 {
+ reg = <2>;
+ label = "lan3";
++ phy-mode = "internal";
+ local-mac-address = [00 00 00 00 00 00];
+ };
+
+ lan4: port@3 {
+ reg = <3>;
+ label = "lan4";
++ phy-mode = "internal";
+ local-mac-address = [00 00 00 00 00 00];
+ };
+
+ lan5: port@4 {
+ reg = <4>;
+ label = "lan5";
++ phy-mode = "internal";
+ local-mac-address = [00 00 00 00 00 00];
+ };
+
+- port@6 {
+- reg = <6>;
++ port@5 {
++ reg = <5>;
+ label = "cpu";
+ ethernet = <&fec>;
+ phy-mode = "rgmii-id";
+diff --git a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
+index 09f7364dd1d05..1cd389b1b95d6 100644
+--- a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
+@@ -172,6 +172,7 @@
+ compatible = "fsl,imx8ulp-pcc3";
+ reg = <0x292d0000 0x10000>;
+ #clock-cells = <1>;
++ #reset-cells = <1>;
+ };
+
+ tpm5: tpm@29340000 {
+@@ -270,6 +271,7 @@
+ compatible = "fsl,imx8ulp-pcc4";
+ reg = <0x29800000 0x10000>;
+ #clock-cells = <1>;
++ #reset-cells = <1>;
+ };
+
+ lpi2c6: i2c@29840000 {
+@@ -414,6 +416,7 @@
+ compatible = "fsl,imx8ulp-pcc5";
+ reg = <0x2da70000 0x10000>;
+ #clock-cells = <1>;
++ #reset-cells = <1>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
+index 7249871530ab9..5eecbefa8a336 100644
+--- a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
++++ b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
+@@ -2,8 +2,8 @@
+ /*
+ * Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (c) 2020 Engicam srl
+- * Copyright (c) 2020 Amarula Solutons
+- * Copyright (c) 2020 Amarula Solutons(India)
++ * Copyright (c) 2020 Amarula Solutions
++ * Copyright (c) 2020 Amarula Solutions(India)
+ */
+
+ #include <dt-bindings/gpio/gpio.h>
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+index 31ebb4e5fd330..0f9cc042d9bf0 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+@@ -88,3 +88,8 @@
+ };
+ };
+ };
++
++&wlan_host_wake_l {
++ /* Kevin has an external pull up, but Bob does not. */
++ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
++};
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+index 50d459ee4831c..af5810e5f5b79 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+@@ -244,6 +244,14 @@
+ &edp {
+ status = "okay";
+
++ /*
++ * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only
++ * set this here, because rk3399-gru.dtsi ensures we can generate this
++ * off GPLL=600MHz, whereas some other RK3399 boards may not.
++ */
++ assigned-clocks = <&cru PCLK_EDP>;
++ assigned-clock-rates = <24000000>;
++
+ ports {
+ edp_out: port@1 {
+ reg = <1>;
+@@ -578,6 +586,7 @@ ap_i2c_tp: &i2c5 {
+ };
+
+ wlan_host_wake_l: wlan-host-wake-l {
++ /* Kevin has an external pull up, but Bob does not */
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+index b1ac3a89f259c..aa3e21bd6c8f4 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+@@ -62,7 +62,6 @@
+ vcc5v0_host: vcc5v0-host-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
+- enable-active-low;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+ regulator-name = "vcc5v0_host";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
+index fa953b7366421..fdbfdf3634e43 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
+@@ -163,7 +163,6 @@
+
+ vcc3v3_sd: vcc3v3_sd {
+ compatible = "regulator-fixed";
+- enable-active-low;
+ gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc_sd_h>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+index 02d5f5a8ca036..528bb4e8ac776 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+@@ -506,7 +506,7 @@
+ disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
+- sd-uhs-sdr104;
++ sd-uhs-sdr50;
+ vmmc-supply = <&vcc3v3_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
+index 622be8be9813d..282f5c74d5cda 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
+@@ -618,7 +618,7 @@
+ };
+
+ &usb2phy0_otg {
+- vbus-supply = <&vcc5v0_usb_otg>;
++ phy-supply = <&vcc5v0_usb_otg>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+index 0813c0c5abded..26912f02684ce 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+@@ -543,7 +543,7 @@
+ };
+
+ &usb2phy0_otg {
+- vbus-supply = <&vcc5v0_usb_otg>;
++ phy-supply = <&vcc5v0_usb_otg>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
+index 707b5451929d4..d4abb948eb14e 100644
+--- a/arch/arm64/kernel/topology.c
++++ b/arch/arm64/kernel/topology.c
+@@ -251,7 +251,7 @@ static void amu_fie_setup(const struct cpumask *cpus)
+ for_each_cpu(cpu, cpus) {
+ if (!freq_counters_valid(cpu) ||
+ freq_inv_set_max_ratio(cpu,
+- cpufreq_get_hw_max_freq(cpu) * 1000,
++ cpufreq_get_hw_max_freq(cpu) * 1000ULL,
+ arch_timer_get_rate()))
+ return;
+ }
+diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
+index 7a623684d9b5e..2d5a0bcb0cec1 100644
+--- a/arch/mips/lantiq/clk.c
++++ b/arch/mips/lantiq/clk.c
+@@ -50,6 +50,7 @@ struct clk *clk_get_io(void)
+ {
+ return &cpu_clk_generic[2];
+ }
++EXPORT_SYMBOL_GPL(clk_get_io);
+
+ struct clk *clk_get_ppe(void)
+ {
+diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
+index 794c96c2a4cdd..311dc1580bbde 100644
+--- a/arch/mips/loongson32/common/platform.c
++++ b/arch/mips/loongson32/common/platform.c
+@@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
+ if (plat_dat->bus_id) {
+ __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
+ GMAC1_USE_UART0, LS1X_MUX_CTRL0);
+- switch (plat_dat->interface) {
++ switch (plat_dat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
+ break;
+@@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
+ break;
+ default:
+ pr_err("unsupported mii mode %d\n",
+- plat_dat->interface);
++ plat_dat->phy_interface);
+ return -ENOTSUPP;
+ }
+ val &= ~GMAC1_SHUT;
+ } else {
+- switch (plat_dat->interface) {
++ switch (plat_dat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
+ break;
+@@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
+ break;
+ default:
+ pr_err("unsupported mii mode %d\n",
+- plat_dat->interface);
++ plat_dat->phy_interface);
+ return -ENOTSUPP;
+ }
+ val &= ~GMAC0_SHUT;
+@@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
+ plat_dat = dev_get_platdata(&pdev->dev);
+
+ val &= ~PHY_INTF_SELI;
+- if (plat_dat->interface == PHY_INTERFACE_MODE_RMII)
++ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII)
+ val |= 0x4 << PHY_INTF_SELI_SHIFT;
+ __raw_writel(val, LS1X_MUX_CTRL1);
+
+@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = {
+ .bus_id = 0,
+ .phy_addr = -1,
+ #if defined(CONFIG_LOONGSON1_LS1B)
+- .interface = PHY_INTERFACE_MODE_MII,
++ .phy_interface = PHY_INTERFACE_MODE_MII,
+ #elif defined(CONFIG_LOONGSON1_LS1C)
+- .interface = PHY_INTERFACE_MODE_RMII,
++ .phy_interface = PHY_INTERFACE_MODE_RMII,
+ #endif
+ .mdio_bus_data = &ls1x_mdio_bus_data,
+ .dma_cfg = &ls1x_eth_dma_cfg,
+@@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = {
+ static struct plat_stmmacenet_data ls1x_eth1_pdata = {
+ .bus_id = 1,
+ .phy_addr = -1,
+- .interface = PHY_INTERFACE_MODE_MII,
++ .phy_interface = PHY_INTERFACE_MODE_MII,
+ .mdio_bus_data = &ls1x_mdio_bus_data,
+ .dma_cfg = &ls1x_eth_dma_cfg,
+ .has_gmac = 1,
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index fcbb81feb7ad8..1f02f15569749 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -361,6 +361,7 @@ config RISCV_ISA_C
+ config RISCV_ISA_SVPBMT
+ bool "SVPBMT extension support"
+ depends on 64BIT && MMU
++ depends on !XIP_KERNEL
+ select RISCV_ALTERNATIVE
+ default y
+ help
+diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
+index 5a2de6b6f8822..5c591123c4409 100644
+--- a/arch/riscv/kernel/signal.c
++++ b/arch/riscv/kernel/signal.c
+@@ -124,6 +124,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
++ regs->cause = -1UL;
++
+ return regs->a0;
+
+ badframe:
+diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
+index e0de60e503b98..d9e023c78f568 100644
+--- a/arch/um/kernel/um_arch.c
++++ b/arch/um/kernel/um_arch.c
+@@ -33,7 +33,7 @@
+ #include "um_arch.h"
+
+ #define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
+-#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty"
++#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty0"
+
+ /* Changed in add_arg and setup_arch, which run before SMP is started */
+ static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 4c0e812f2f044..19c04412f6e16 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -713,6 +713,7 @@ struct kvm_vcpu_arch {
+ struct fpu_guest guest_fpu;
+
+ u64 xcr0;
++ u64 guest_supported_xcr0;
+
+ struct kvm_pio_request pio;
+ void *pio_data;
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index de6d44e07e348..3ab498165639f 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -283,7 +283,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ struct kvm_cpuid_entry2 *best;
+- u64 guest_supported_xcr0;
+
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ if (best && apic) {
+@@ -295,10 +294,16 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
+ kvm_apic_set_version(vcpu);
+ }
+
+- guest_supported_xcr0 =
++ vcpu->arch.guest_supported_xcr0 =
+ cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
+
+- vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0;
++ /*
++ * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
++ * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
++ * supported by the host.
++ */
++ vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
++ XFEATURE_MASK_FPSSE;
+
+ kvm_update_pv_runtime(vcpu);
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 09fa8a94807bf..0c4a866813b31 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4134,6 +4134,9 @@ static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
+ {
+ u32 eax, ecx, edx;
+
++ if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
++ return emulate_ud(ctxt);
++
+ eax = reg_read(ctxt, VCPU_REGS_RAX);
+ edx = reg_read(ctxt, VCPU_REGS_RDX);
+ ecx = reg_read(ctxt, VCPU_REGS_RCX);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 5b36866528568..8c2815151864b 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1025,15 +1025,10 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
+ }
+ EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
+
+-static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu)
+-{
+- return vcpu->arch.guest_fpu.fpstate->user_xfeatures;
+-}
+-
+ #ifdef CONFIG_X86_64
+ static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
+ {
+- return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC;
++ return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
+ }
+ #endif
+
+@@ -1056,7 +1051,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+ * saving. However, xcr0 bit 0 is always set, even if the
+ * emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
+ */
+- valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP;
++ valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
+ if (xcr0 & ~valid_bits)
+ return 1;
+
+@@ -1084,6 +1079,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+
+ int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
+ {
++ /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
+ if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
+ __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
+ kvm_inject_gp(vcpu, 0);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index cc6fbcb6d2521..7743c68177e89 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -284,49 +284,6 @@ void blk_queue_start_drain(struct request_queue *q)
+ wake_up_all(&q->mq_freeze_wq);
+ }
+
+-/**
+- * blk_cleanup_queue - shutdown a request queue
+- * @q: request queue to shutdown
+- *
+- * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
+- * put it. All future requests will be failed immediately with -ENODEV.
+- *
+- * Context: can sleep
+- */
+-void blk_cleanup_queue(struct request_queue *q)
+-{
+- /* cannot be called from atomic context */
+- might_sleep();
+-
+- WARN_ON_ONCE(blk_queue_registered(q));
+-
+- /* mark @q DYING, no new request or merges will be allowed afterwards */
+- blk_queue_flag_set(QUEUE_FLAG_DYING, q);
+- blk_queue_start_drain(q);
+-
+- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+- blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+-
+- /*
+- * Drain all requests queued before DYING marking. Set DEAD flag to
+- * prevent that blk_mq_run_hw_queues() accesses the hardware queues
+- * after draining finished.
+- */
+- blk_freeze_queue(q);
+-
+- blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
+-
+- blk_sync_queue(q);
+- if (queue_is_mq(q)) {
+- blk_mq_cancel_work_sync(q);
+- blk_mq_exit_queue(q);
+- }
+-
+- /* @q is and will stay empty, shutdown and put */
+- blk_put_queue(q);
+-}
+-EXPORT_SYMBOL(blk_cleanup_queue);
+-
+ /**
+ * blk_queue_enter() - try to increase q->q_usage_counter
+ * @q: request queue pointer
+diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
+index 61f179e5f151a..28adb01f64419 100644
+--- a/block/blk-mq-debugfs.c
++++ b/block/blk-mq-debugfs.c
+@@ -116,7 +116,6 @@ static const char *const blk_queue_flag_name[] = {
+ QUEUE_FLAG_NAME(NOXMERGES),
+ QUEUE_FLAG_NAME(ADD_RANDOM),
+ QUEUE_FLAG_NAME(SAME_FORCE),
+- QUEUE_FLAG_NAME(DEAD),
+ QUEUE_FLAG_NAME(INIT_DONE),
+ QUEUE_FLAG_NAME(STABLE_WRITES),
+ QUEUE_FLAG_NAME(POLL),
+@@ -151,11 +150,10 @@ static ssize_t queue_state_write(void *data, const char __user *buf,
+ char opbuf[16] = { }, *op;
+
+ /*
+- * The "state" attribute is removed after blk_cleanup_queue() has called
+- * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
+- * triggering a use-after-free.
++ * The "state" attribute is removed when the queue is removed. Don't
++ * allow setting the state on a dying queue to avoid a use-after-free.
+ */
+- if (blk_queue_dead(q))
++ if (blk_queue_dying(q))
+ return -ENOENT;
+
+ if (count >= sizeof(opbuf)) {
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 0a299941c622e..69d0a58f9e2f1 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -3896,7 +3896,7 @@ static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
+ q->queuedata = queuedata;
+ ret = blk_mq_init_allocated_queue(set, q);
+ if (ret) {
+- blk_cleanup_queue(q);
++ blk_put_queue(q);
+ return ERR_PTR(ret);
+ }
+ return q;
+@@ -3908,6 +3908,35 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
+ }
+ EXPORT_SYMBOL(blk_mq_init_queue);
+
++/**
++ * blk_mq_destroy_queue - shutdown a request queue
++ * @q: request queue to shutdown
++ *
++ * This shuts down a request queue allocated by blk_mq_init_queue() and drops
++ * the initial reference. All future requests will failed with -ENODEV.
++ *
++ * Context: can sleep
++ */
++void blk_mq_destroy_queue(struct request_queue *q)
++{
++ WARN_ON_ONCE(!queue_is_mq(q));
++ WARN_ON_ONCE(blk_queue_registered(q));
++
++ might_sleep();
++
++ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
++ blk_queue_start_drain(q);
++ blk_freeze_queue(q);
++
++ blk_sync_queue(q);
++ blk_mq_cancel_work_sync(q);
++ blk_mq_exit_queue(q);
++
++ /* @q is and will stay empty, shutdown and put */
++ blk_put_queue(q);
++}
++EXPORT_SYMBOL(blk_mq_destroy_queue);
++
+ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
+ struct lock_class_key *lkclass)
+ {
+@@ -3920,13 +3949,23 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
+
+ disk = __alloc_disk_node(q, set->numa_node, lkclass);
+ if (!disk) {
+- blk_cleanup_queue(q);
++ blk_mq_destroy_queue(q);
+ return ERR_PTR(-ENOMEM);
+ }
++ set_bit(GD_OWNS_QUEUE, &disk->state);
+ return disk;
+ }
+ EXPORT_SYMBOL(__blk_mq_alloc_disk);
+
++struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
++ struct lock_class_key *lkclass)
++{
++ if (!blk_get_queue(q))
++ return NULL;
++ return __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
++}
++EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
++
+ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
+ struct blk_mq_tag_set *set, struct request_queue *q,
+ int hctx_idx, int node)
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 9b905e9443e49..84d7f87015673 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -748,11 +748,6 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
+ * decremented with blk_put_queue(). Once the refcount reaches 0 this function
+ * is called.
+ *
+- * For drivers that have a request_queue on a gendisk and added with
+- * __device_add_disk() the refcount to request_queue will reach 0 with
+- * the last put_disk() called by the driver. For drivers which don't use
+- * __device_add_disk() this happens with blk_cleanup_queue().
+- *
+ * Drivers exist which depend on the release of the request_queue to be
+ * synchronous, it should not be deferred.
+ *
+diff --git a/block/blk.h b/block/blk.h
+index 434017701403f..0d6668663ab5d 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -411,6 +411,9 @@ int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
+ sector_t length);
+ void blk_drop_partitions(struct gendisk *disk);
+
++struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
++ struct lock_class_key *lkclass);
++
+ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset,
+ unsigned int max_sectors, bool *same_page);
+diff --git a/block/bsg-lib.c b/block/bsg-lib.c
+index acfe1357bf6c4..fd4cd5e682826 100644
+--- a/block/bsg-lib.c
++++ b/block/bsg-lib.c
+@@ -324,7 +324,7 @@ void bsg_remove_queue(struct request_queue *q)
+ container_of(q->tag_set, struct bsg_set, tag_set);
+
+ bsg_unregister_queue(bset->bd);
+- blk_cleanup_queue(q);
++ blk_mq_destroy_queue(q);
+ blk_mq_free_tag_set(&bset->tag_set);
+ kfree(bset);
+ }
+@@ -399,7 +399,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
+
+ return q;
+ out_cleanup_queue:
+- blk_cleanup_queue(q);
++ blk_mq_destroy_queue(q);
+ out_queue:
+ blk_mq_free_tag_set(set);
+ out_tag_set:
+diff --git a/block/genhd.c b/block/genhd.c
+index 278227ba1d531..a39c416d658fd 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -617,13 +617,14 @@ void del_gendisk(struct gendisk *disk)
+ * Fail any new I/O.
+ */
+ set_bit(GD_DEAD, &disk->state);
++ if (test_bit(GD_OWNS_QUEUE, &disk->state))
++ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
+ set_capacity(disk, 0);
+
+ /*
+ * Prevent new I/O from crossing bio_queue_enter().
+ */
+ blk_queue_start_drain(q);
+- blk_mq_freeze_queue_wait(q);
+
+ if (!(disk->flags & GENHD_FL_HIDDEN)) {
+ sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
+@@ -647,6 +648,8 @@ void del_gendisk(struct gendisk *disk)
+ pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
+ device_del(disk_to_dev(disk));
+
++ blk_mq_freeze_queue_wait(q);
++
+ blk_throtl_cancel_bios(disk->queue);
+
+ blk_sync_queue(q);
+@@ -663,11 +666,16 @@ void del_gendisk(struct gendisk *disk)
+ blk_mq_unquiesce_queue(q);
+
+ /*
+- * Allow using passthrough request again after the queue is torn down.
++ * If the disk does not own the queue, allow using passthrough requests
++ * again. Else leave the queue frozen to fail all I/O.
+ */
+- blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
+- __blk_mq_unfreeze_queue(q, true);
+-
++ if (!test_bit(GD_OWNS_QUEUE, &disk->state)) {
++ blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
++ __blk_mq_unfreeze_queue(q, true);
++ } else {
++ if (queue_is_mq(q))
++ blk_mq_exit_queue(q);
++ }
+ }
+ EXPORT_SYMBOL(del_gendisk);
+
+@@ -1151,6 +1159,18 @@ static void disk_release(struct device *dev)
+ might_sleep();
+ WARN_ON_ONCE(disk_live(disk));
+
++ /*
++ * To undo the all initialization from blk_mq_init_allocated_queue in
++ * case of a probe failure where add_disk is never called we have to
++ * call blk_mq_exit_queue here. We can't do this for the more common
++ * teardown case (yet) as the tagset can be gone by the time the disk
++ * is released once it was added.
++ */
++ if (queue_is_mq(disk->queue) &&
++ test_bit(GD_OWNS_QUEUE, &disk->state) &&
++ !test_bit(GD_ADDED, &disk->state))
++ blk_mq_exit_queue(disk->queue);
++
+ blkcg_exit_queue(disk->queue);
+
+ disk_release_events(disk);
+@@ -1338,12 +1358,9 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
+ {
+ struct gendisk *disk;
+
+- if (!blk_get_queue(q))
+- return NULL;
+-
+ disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
+ if (!disk)
+- goto out_put_queue;
++ return NULL;
+
+ disk->bdi = bdi_alloc(node_id);
+ if (!disk->bdi)
+@@ -1387,11 +1404,8 @@ out_free_bdi:
+ bdi_put(disk->bdi);
+ out_free_disk:
+ kfree(disk);
+-out_put_queue:
+- blk_put_queue(q);
+ return NULL;
+ }
+-EXPORT_SYMBOL(__alloc_disk_node);
+
+ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
+ {
+@@ -1404,9 +1418,10 @@ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
+
+ disk = __alloc_disk_node(q, node, lkclass);
+ if (!disk) {
+- blk_cleanup_queue(q);
++ blk_put_queue(q);
+ return NULL;
+ }
++ set_bit(GD_OWNS_QUEUE, &disk->state);
+ return disk;
+ }
+ EXPORT_SYMBOL(__blk_alloc_disk);
+@@ -1418,6 +1433,9 @@ EXPORT_SYMBOL(__blk_alloc_disk);
+ * This decrements the refcount for the struct gendisk. When this reaches 0
+ * we'll have disk_release() called.
+ *
++ * Note: for blk-mq disk put_disk must be called before freeing the tag_set
++ * when handling probe errors (that is before add_disk() is called).
++ *
+ * Context: Any context, but the last reference must not be dropped from
+ * atomic context.
+ */
+@@ -1439,7 +1457,6 @@ EXPORT_SYMBOL(put_disk);
+ */
+ void blk_cleanup_disk(struct gendisk *disk)
+ {
+- blk_cleanup_queue(disk->queue);
+ put_disk(disk);
+ }
+ EXPORT_SYMBOL(blk_cleanup_disk);
+diff --git a/certs/Kconfig b/certs/Kconfig
+index bf9b511573d75..1f109b0708778 100644
+--- a/certs/Kconfig
++++ b/certs/Kconfig
+@@ -43,7 +43,7 @@ config SYSTEM_TRUSTED_KEYRING
+ bool "Provide system-wide ring of trusted keys"
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+- depends on X509_CERTIFICATE_PARSER
++ depends on X509_CERTIFICATE_PARSER = y
+ help
+ Provide a system keyring to which trusted keys can be added. Keys in
+ the keyring are considered to be trusted. Keys may be added at will
+diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
+index e232cc4fd444b..c6e41ee18aaa2 100644
+--- a/drivers/block/ataflop.c
++++ b/drivers/block/ataflop.c
+@@ -2045,7 +2045,6 @@ static void atari_floppy_cleanup(void)
+ if (!unit[i].disk[type])
+ continue;
+ del_gendisk(unit[i].disk[type]);
+- blk_cleanup_queue(unit[i].disk[type]->queue);
+ put_disk(unit[i].disk[type]);
+ }
+ blk_mq_free_tag_set(&unit[i].tag_set);
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index a59910ef948e9..1c036ef686fbb 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -2062,7 +2062,6 @@ static void loop_remove(struct loop_device *lo)
+ {
+ /* Make this loop device unreachable from pathname. */
+ del_gendisk(lo->lo_disk);
+- blk_cleanup_queue(lo->lo_disk->queue);
+ blk_mq_free_tag_set(&lo->tag_set);
+
+ mutex_lock(&loop_ctl_mutex);
+diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
+index 6699e4b2f7f43..06994a35acc7a 100644
+--- a/drivers/block/mtip32xx/mtip32xx.c
++++ b/drivers/block/mtip32xx/mtip32xx.c
+@@ -3677,7 +3677,6 @@ static int mtip_block_shutdown(struct driver_data *dd)
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
+
+- blk_cleanup_queue(dd->queue);
+ blk_mq_free_tag_set(&dd->tags);
+ put_disk(dd->disk);
+ return 0;
+@@ -4040,7 +4039,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
+ dev_info(&dd->pdev->dev, "device %s surprise removal\n",
+ dd->disk->disk_name);
+
+- blk_cleanup_queue(dd->queue);
+ blk_mq_free_tag_set(&dd->tags);
+
+ /* De-initialize the protocol layer. */
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index 409c76b81aed4..a4470374f54fc 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -1755,7 +1755,7 @@ static void rnbd_destroy_sessions(void)
+ list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
+ /*
+ * Here unmap happens in parallel for only one reason:
+- * blk_cleanup_queue() takes around half a second, so
++ * del_gendisk() takes around half a second, so
+ * on huge amount of devices the whole module unload
+ * procedure takes minutes.
+ */
+diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
+index 63b4f6431d2e6..75057dbbcfbea 100644
+--- a/drivers/block/sx8.c
++++ b/drivers/block/sx8.c
+@@ -1536,7 +1536,7 @@ err_out_free_majors:
+ clear_bit(0, &carm_major_alloc);
+ else if (host->major == 161)
+ clear_bit(1, &carm_major_alloc);
+- blk_cleanup_queue(host->oob_q);
++ blk_mq_destroy_queue(host->oob_q);
+ blk_mq_free_tag_set(&host->tag_set);
+ err_out_dma_free:
+ dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
+@@ -1570,7 +1570,7 @@ static void carm_remove_one (struct pci_dev *pdev)
+ clear_bit(0, &carm_major_alloc);
+ else if (host->major == 161)
+ clear_bit(1, &carm_major_alloc);
+- blk_cleanup_queue(host->oob_q);
++ blk_mq_destroy_queue(host->oob_q);
+ blk_mq_free_tag_set(&host->tag_set);
+ dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
+ iounmap(host->mmio);
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index d756423e0059a..59d6d5faf7396 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -1107,7 +1107,6 @@ static void virtblk_remove(struct virtio_device *vdev)
+ flush_work(&vblk->config_work);
+
+ del_gendisk(vblk->disk);
+- blk_cleanup_queue(vblk->disk->queue);
+ blk_mq_free_tag_set(&vblk->tag_set);
+
+ mutex_lock(&vblk->vdev_mutex);
+diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
+index 7a6ed83481b8d..18ad43d9933ec 100644
+--- a/drivers/block/z2ram.c
++++ b/drivers/block/z2ram.c
+@@ -384,7 +384,6 @@ static void __exit z2_exit(void)
+
+ for (i = 0; i < Z2MINOR_COUNT; i++) {
+ del_gendisk(z2ram_gendisk[i]);
+- blk_cleanup_queue(z2ram_gendisk[i]->queue);
+ put_disk(z2ram_gendisk[i]);
+ }
+ blk_mq_free_tag_set(&tag_set);
+diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
+index 8e78b37d0f6a4..f4cc90ea6198e 100644
+--- a/drivers/cdrom/gdrom.c
++++ b/drivers/cdrom/gdrom.c
+@@ -831,7 +831,6 @@ probe_fail_no_mem:
+
+ static int remove_gdrom(struct platform_device *devptr)
+ {
+- blk_cleanup_queue(gd.gdrom_rq);
+ blk_mq_free_tag_set(&gd.tag_set);
+ free_irq(HW_EVENT_GDROM_CMD, &gd);
+ free_irq(HW_EVENT_GDROM_DMA, &gd);
+diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
+index cb6401c9e9a4f..acf31cc1dbcca 100644
+--- a/drivers/dax/hmem/device.c
++++ b/drivers/dax/hmem/device.c
+@@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r)
+ .start = r->start,
+ .end = r->end,
+ .flags = IORESOURCE_MEM,
++ .desc = IORES_DESC_SOFT_RESERVED,
+ };
+ struct platform_device *pdev;
+ struct memregion_info info;
+diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
+index d4f1e4e9603a4..85e00701473cb 100644
+--- a/drivers/dma/ti/k3-udma-private.c
++++ b/drivers/dma/ti/k3-udma-private.c
+@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
+ }
+
+ pdev = of_find_device_by_node(udma_node);
++ if (np != udma_node)
++ of_node_put(udma_node);
++
+ if (!pdev) {
+ pr_debug("UDMA device not found\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+- if (np != udma_node)
+- of_node_put(udma_node);
+-
+ ud = platform_get_drvdata(pdev);
+ if (!ud) {
+ pr_debug("UDMA has not been probed\n");
+diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
+index 673f3eb498f43..e9afa8cab7309 100644
+--- a/drivers/firmware/arm_scmi/reset.c
++++ b/drivers/firmware/arm_scmi/reset.c
+@@ -166,9 +166,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
+ struct scmi_xfer *t;
+ struct scmi_msg_reset_domain_reset *dom;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+- struct reset_dom_info *rdom = pi->dom_info + domain;
++ struct reset_dom_info *rdom;
+
+- if (rdom->async_reset)
++ if (domain >= pi->num_domains)
++ return -EINVAL;
++
++ rdom = pi->dom_info + domain;
++ if (rdom->async_reset && flags & AUTONOMOUS_RESET)
+ flags |= ASYNCHRONOUS_RESET;
+
+ ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
+@@ -180,7 +184,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
+ dom->flags = cpu_to_le32(flags);
+ dom->reset_state = cpu_to_le32(state);
+
+- if (rdom->async_reset)
++ if (flags & ASYNCHRONOUS_RESET)
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ else
+ ret = ph->xops->do_xfer(ph, t);
+diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
+index 8a18930f3eb69..516f4f0069bd2 100644
+--- a/drivers/firmware/efi/libstub/secureboot.c
++++ b/drivers/firmware/efi/libstub/secureboot.c
+@@ -14,7 +14,7 @@
+
+ /* SHIM variables */
+ static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
+-static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
++static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
+
+ static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
+ unsigned long *data_size, void *data)
+@@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void)
+
+ /*
+ * See if a user has put the shim into insecure mode. If so, and if the
+- * variable doesn't have the runtime attribute set, we might as well
+- * honor that.
++ * variable doesn't have the non-volatile attribute set, we might as
++ * well honor that.
+ */
+ size = sizeof(moksbstate);
+ status = get_efi_var(shim_MokSBState_name, &shim_guid,
+@@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void)
+ /* If it fails, we don't care why. Default to secure */
+ if (status != EFI_SUCCESS)
+ goto secure_boot_enabled;
+- if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
++ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
+ return efi_secureboot_mode_disabled;
+
+ secure_boot_enabled:
+diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
+index 05ae8bcc9d671..9780f32a9f243 100644
+--- a/drivers/firmware/efi/libstub/x86-stub.c
++++ b/drivers/firmware/efi/libstub/x86-stub.c
+@@ -517,6 +517,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ hdr->ramdisk_image = 0;
+ hdr->ramdisk_size = 0;
+
++ /*
++ * Disregard any setup data that was provided by the bootloader:
++ * setup_data could be pointing anywhere, and we have no way of
++ * authenticating or validating the payload.
++ */
++ hdr->setup_data = 0;
++
+ efi_stub_entry(handle, sys_table_arg, boot_params);
+ /* not reached */
+
+diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
+index 312309be0287d..56656fb519f85 100644
+--- a/drivers/gpio/gpio-ixp4xx.c
++++ b/drivers/gpio/gpio-ixp4xx.c
+@@ -63,6 +63,14 @@ static void ixp4xx_gpio_irq_ack(struct irq_data *d)
+ __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
+ }
+
++static void ixp4xx_gpio_mask_irq(struct irq_data *d)
++{
++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++
++ irq_chip_mask_parent(d);
++ gpiochip_disable_irq(gc, d->hwirq);
++}
++
+ static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+@@ -72,6 +80,7 @@ static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
+ if (!(g->irq_edge & BIT(d->hwirq)))
+ ixp4xx_gpio_irq_ack(d);
+
++ gpiochip_enable_irq(gc, d->hwirq);
+ irq_chip_unmask_parent(d);
+ }
+
+@@ -149,12 +158,14 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+ }
+
+-static struct irq_chip ixp4xx_gpio_irqchip = {
++static const struct irq_chip ixp4xx_gpio_irqchip = {
+ .name = "IXP4GPIO",
+ .irq_ack = ixp4xx_gpio_irq_ack,
+- .irq_mask = irq_chip_mask_parent,
++ .irq_mask = ixp4xx_gpio_mask_irq,
+ .irq_unmask = ixp4xx_gpio_irq_unmask,
+ .irq_set_type = ixp4xx_gpio_irq_set_type,
++ .flags = IRQCHIP_IMMUTABLE,
++ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+ };
+
+ static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+@@ -263,7 +274,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
+ g->gc.owner = THIS_MODULE;
+
+ girq = &g->gc.irq;
+- girq->chip = &ixp4xx_gpio_irqchip;
++ gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
+ girq->fwnode = g->fwnode;
+ girq->parent_domain = parent;
+ girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq;
+diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
+index a2e505a7545cd..523dfd17dd922 100644
+--- a/drivers/gpio/gpio-mockup.c
++++ b/drivers/gpio/gpio-mockup.c
+@@ -533,8 +533,10 @@ static int __init gpio_mockup_register_chip(int idx)
+ }
+
+ fwnode = fwnode_create_software_node(properties, NULL);
+- if (IS_ERR(fwnode))
++ if (IS_ERR(fwnode)) {
++ kfree_strarray(line_names, ngpio);
+ return PTR_ERR(fwnode);
++ }
+
+ pdevinfo.name = "gpio-mockup";
+ pdevinfo.id = idx;
+@@ -597,9 +599,9 @@ static int __init gpio_mockup_init(void)
+
+ static void __exit gpio_mockup_exit(void)
+ {
++ gpio_mockup_unregister_pdevs();
+ debugfs_remove_recursive(gpio_mockup_dbg_dir);
+ platform_driver_unregister(&gpio_mockup_driver);
+- gpio_mockup_unregister_pdevs();
+ }
+
+ module_init(gpio_mockup_init);
+diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
+index d8a26e503ca5d..f163f5ca857be 100644
+--- a/drivers/gpio/gpio-mt7621.c
++++ b/drivers/gpio/gpio-mt7621.c
+@@ -112,6 +112,8 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
+ unsigned long flags;
+ u32 rise, fall, high, low;
+
++ gpiochip_enable_irq(gc, d->hwirq);
++
+ spin_lock_irqsave(&rg->lock, flags);
+ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
+ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
+@@ -143,6 +145,8 @@ mediatek_gpio_irq_mask(struct irq_data *d)
+ mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
+ spin_unlock_irqrestore(&rg->lock, flags);
++
++ gpiochip_disable_irq(gc, d->hwirq);
+ }
+
+ static int
+@@ -204,6 +208,16 @@ mediatek_gpio_xlate(struct gpio_chip *chip,
+ return gpio % MTK_BANK_WIDTH;
+ }
+
++static const struct irq_chip mt7621_irq_chip = {
++ .name = "mt7621-gpio",
++ .irq_mask_ack = mediatek_gpio_irq_mask,
++ .irq_mask = mediatek_gpio_irq_mask,
++ .irq_unmask = mediatek_gpio_irq_unmask,
++ .irq_set_type = mediatek_gpio_irq_type,
++ .flags = IRQCHIP_IMMUTABLE,
++ GPIOCHIP_IRQ_RESOURCE_HELPERS,
++};
++
+ static int
+ mediatek_gpio_bank_probe(struct device *dev, int bank)
+ {
+@@ -238,11 +252,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
+ return -ENOMEM;
+
+ rg->chip.offset = bank * MTK_BANK_WIDTH;
+- rg->irq_chip.name = dev_name(dev);
+- rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
+- rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
+- rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
+- rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
+
+ if (mtk->gpio_irq) {
+ struct gpio_irq_chip *girq;
+@@ -262,7 +271,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
+ }
+
+ girq = &rg->chip.irq;
+- girq->chip = &rg->irq_chip;
++ gpio_irq_chip_set_chip(girq, &mt7621_irq_chip);
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
+index fa4bc7481f9a6..e739dcea61b23 100644
+--- a/drivers/gpio/gpio-tqmx86.c
++++ b/drivers/gpio/gpio-tqmx86.c
+@@ -307,6 +307,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ girq->init_valid_mask = tqmx86_init_irq_valid_mask;
++
++ irq_domain_set_pm_device(girq->domain, dev);
+ }
+
+ ret = devm_gpiochip_add_data(dev, chip, gpio);
+@@ -315,8 +317,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
+ goto out_pm_dis;
+ }
+
+- irq_domain_set_pm_device(girq->domain, dev);
+-
+ dev_info(dev, "GPIO functionality initialized with %d pins\n",
+ chip->ngpio);
+
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index b26e643383762..21fee9ed7f0d2 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -1975,7 +1975,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+ ret = -ENODEV;
+ goto out_free_le;
+ }
+- le->irq = irq;
+
+ if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+@@ -1989,7 +1988,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+ init_waitqueue_head(&le->wait);
+
+ /* Request a thread to read the events */
+- ret = request_threaded_irq(le->irq,
++ ret = request_threaded_irq(irq,
+ lineevent_irq_handler,
+ lineevent_irq_thread,
+ irqflags,
+@@ -1998,6 +1997,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+ if (ret)
+ goto out_free_le;
+
++ le->irq = irq;
++
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 4dfd6724b3caa..0a8c15c3a04c3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -35,6 +35,8 @@
+ #include <linux/pci.h>
+ #include <linux/pm_runtime.h>
+ #include <drm/drm_crtc_helper.h>
++#include <drm/drm_damage_helper.h>
++#include <drm/drm_drv.h>
+ #include <drm/drm_edid.h>
+ #include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_fb_helper.h>
+@@ -495,6 +497,12 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
+ .create_handle = drm_gem_fb_create_handle,
+ };
+
++static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
++ .destroy = drm_gem_fb_destroy,
++ .create_handle = drm_gem_fb_create_handle,
++ .dirty = drm_atomic_helper_dirtyfb,
++};
++
+ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
+ uint64_t bo_flags)
+ {
+@@ -1069,7 +1077,10 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
+ if (ret)
+ goto err;
+
+- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
++ if (drm_drv_uses_atomic_modeset(dev))
++ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
++ else
++ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (ret)
+ goto err;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index b19bf0c3f3737..79ce654bd3dad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -748,7 +748,7 @@ static int psp_tmr_init(struct psp_context *psp)
+ }
+
+ pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
+- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev),
++ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+index e431f49949319..cd366c7f311fd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+@@ -36,6 +36,7 @@
+ #define PSP_CMD_BUFFER_SIZE 0x1000
+ #define PSP_1_MEG 0x100000
+ #define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
++#define PSP_TMR_ALIGNMENT 0x100000
+ #define PSP_FW_NAME_LEN 0x24
+
+ enum psp_shared_mem_size {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index dac202ae864dd..9193ca5d6fe7a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -1805,7 +1805,8 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
+ amdgpu_ras_query_error_status(adev, &info);
+
+ if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
+- adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
++ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
++ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
+ if (amdgpu_ras_reset_error_status(adev, info.head.block))
+ dev_warn(adev->dev, "Failed to reset error counter and error status");
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+index cdc0c97798483..6c1fd471a4c7d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+@@ -28,6 +28,14 @@
+ #include "nbio/nbio_7_7_0_sh_mask.h"
+ #include <uapi/linux/kfd_ioctl.h>
+
++static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev)
++{
++ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
++ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
++ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
++ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
++}
++
+ static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev)
+ {
+ u32 tmp;
+@@ -237,4 +245,5 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
+ .ih_doorbell_range = nbio_v7_7_ih_doorbell_range,
+ .ih_control = nbio_v7_7_ih_control,
+ .init_registers = nbio_v7_7_init_registers,
++ .remap_hdp_registers = nbio_v7_7_remap_hdp_registers,
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+index f47d82da115c9..42a567e71439b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+@@ -6651,8 +6651,7 @@ static double CalculateUrgentLatency(
+ return ret;
+ }
+
+-
+-static void UseMinimumDCFCLK(
++static noinline_for_stack void UseMinimumDCFCLK(
+ struct display_mode_lib *mode_lib,
+ int MaxInterDCNTileRepeaters,
+ int MaxPrefetchMode,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+index e4b9fd31223c9..40a672236198e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+@@ -261,33 +261,13 @@ static void CalculateRowBandwidth(
+
+ static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
++ unsigned int k,
+ double HostVMInefficiencyFactor,
+ double UrgentExtraLatency,
+ double UrgentLatency,
+- unsigned int GPUVMMaxPageTableLevels,
+- bool HostVMEnable,
+- unsigned int HostVMMaxNonCachedPageTableLevels,
+- bool GPUVMEnable,
+- double HostVMMinPageSize,
+ double PDEAndMetaPTEBytesPerFrame,
+ double MetaRowBytes,
+- double DPTEBytesPerRow,
+- double BandwidthAvailableForImmediateFlip,
+- unsigned int TotImmediateFlipBytes,
+- enum source_format_class SourcePixelFormat,
+- double LineTime,
+- double VRatio,
+- double VRatioChroma,
+- double Tno_bw,
+- bool DCCEnable,
+- unsigned int dpte_row_height,
+- unsigned int meta_row_height,
+- unsigned int dpte_row_height_chroma,
+- unsigned int meta_row_height_chroma,
+- double *DestinationLinesToRequestVMInImmediateFlip,
+- double *DestinationLinesToRequestRowInImmediateFlip,
+- double *final_flip_bw,
+- bool *ImmediateFlipSupportedForPipe);
++ double DPTEBytesPerRow);
+ static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+@@ -321,64 +301,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
+ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
+ struct display_mode_lib *mode_lib,
+ unsigned int PrefetchMode,
+- unsigned int NumberOfActivePlanes,
+- unsigned int MaxLineBufferLines,
+- unsigned int LineBufferSize,
+- unsigned int WritebackInterfaceBufferSize,
+ double DCFCLK,
+ double ReturnBW,
+- bool SynchronizedVBlank,
+- unsigned int dpte_group_bytes[],
+- unsigned int MetaChunkSize,
+ double UrgentLatency,
+ double ExtraLatency,
+- double WritebackLatency,
+- double WritebackChunkSize,
+ double SOCCLK,
+- double DRAMClockChangeLatency,
+- double SRExitTime,
+- double SREnterPlusExitTime,
+- double SRExitZ8Time,
+- double SREnterPlusExitZ8Time,
+ double DCFCLKDeepSleep,
+ unsigned int DETBufferSizeY[],
+ unsigned int DETBufferSizeC[],
+ unsigned int SwathHeightY[],
+ unsigned int SwathHeightC[],
+- unsigned int LBBitPerPixel[],
+ double SwathWidthY[],
+ double SwathWidthC[],
+- double HRatio[],
+- double HRatioChroma[],
+- unsigned int vtaps[],
+- unsigned int VTAPsChroma[],
+- double VRatio[],
+- double VRatioChroma[],
+- unsigned int HTotal[],
+- double PixelClock[],
+- unsigned int BlendingAndTiming[],
+ unsigned int DPPPerPlane[],
+ double BytePerPixelDETY[],
+ double BytePerPixelDETC[],
+- double DSTXAfterScaler[],
+- double DSTYAfterScaler[],
+- bool WritebackEnable[],
+- enum source_format_class WritebackPixelFormat[],
+- double WritebackDestinationWidth[],
+- double WritebackDestinationHeight[],
+- double WritebackSourceHeight[],
+ bool UnboundedRequestEnabled,
+ int unsigned CompressedBufferSizeInkByte,
+ enum clock_change_support *DRAMClockChangeSupport,
+- double *UrgentWatermark,
+- double *WritebackUrgentWatermark,
+- double *DRAMClockChangeWatermark,
+- double *WritebackDRAMClockChangeWatermark,
+ double *StutterExitWatermark,
+ double *StutterEnterPlusExitWatermark,
+ double *Z8StutterExitWatermark,
+- double *Z8StutterEnterPlusExitWatermark,
+- double *MinActiveDRAMClockChangeLatencySupported);
++ double *Z8StutterEnterPlusExitWatermark);
+
+ static void CalculateDCFCLKDeepSleep(
+ struct display_mode_lib *mode_lib,
+@@ -2914,33 +2858,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ CalculateFlipSchedule(
+ mode_lib,
++ k,
+ HostVMInefficiencyFactor,
+ v->UrgentExtraLatency,
+ v->UrgentLatency,
+- v->GPUVMMaxPageTableLevels,
+- v->HostVMEnable,
+- v->HostVMMaxNonCachedPageTableLevels,
+- v->GPUVMEnable,
+- v->HostVMMinPageSize,
+ v->PDEAndMetaPTEBytesFrame[k],
+ v->MetaRowByte[k],
+- v->PixelPTEBytesPerRow[k],
+- v->BandwidthAvailableForImmediateFlip,
+- v->TotImmediateFlipBytes,
+- v->SourcePixelFormat[k],
+- v->HTotal[k] / v->PixelClock[k],
+- v->VRatio[k],
+- v->VRatioChroma[k],
+- v->Tno_bw[k],
+- v->DCCEnable[k],
+- v->dpte_row_height[k],
+- v->meta_row_height[k],
+- v->dpte_row_height_chroma[k],
+- v->meta_row_height_chroma[k],
+- &v->DestinationLinesToRequestVMInImmediateFlip[k],
+- &v->DestinationLinesToRequestRowInImmediateFlip[k],
+- &v->final_flip_bw[k],
+- &v->ImmediateFlipSupportedForPipe[k]);
++ v->PixelPTEBytesPerRow[k]);
+ }
+
+ v->total_dcn_read_bw_with_flip = 0.0;
+@@ -3027,64 +2951,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ CalculateWatermarksAndDRAMSpeedChangeSupport(
+ mode_lib,
+ PrefetchMode,
+- v->NumberOfActivePlanes,
+- v->MaxLineBufferLines,
+- v->LineBufferSize,
+- v->WritebackInterfaceBufferSize,
+ v->DCFCLK,
+ v->ReturnBW,
+- v->SynchronizedVBlank,
+- v->dpte_group_bytes,
+- v->MetaChunkSize,
+ v->UrgentLatency,
+ v->UrgentExtraLatency,
+- v->WritebackLatency,
+- v->WritebackChunkSize,
+ v->SOCCLK,
+- v->DRAMClockChangeLatency,
+- v->SRExitTime,
+- v->SREnterPlusExitTime,
+- v->SRExitZ8Time,
+- v->SREnterPlusExitZ8Time,
+ v->DCFCLKDeepSleep,
+ v->DETBufferSizeY,
+ v->DETBufferSizeC,
+ v->SwathHeightY,
+ v->SwathHeightC,
+- v->LBBitPerPixel,
+ v->SwathWidthY,
+ v->SwathWidthC,
+- v->HRatio,
+- v->HRatioChroma,
+- v->vtaps,
+- v->VTAPsChroma,
+- v->VRatio,
+- v->VRatioChroma,
+- v->HTotal,
+- v->PixelClock,
+- v->BlendingAndTiming,
+ v->DPPPerPlane,
+ v->BytePerPixelDETY,
+ v->BytePerPixelDETC,
+- v->DSTXAfterScaler,
+- v->DSTYAfterScaler,
+- v->WritebackEnable,
+- v->WritebackPixelFormat,
+- v->WritebackDestinationWidth,
+- v->WritebackDestinationHeight,
+- v->WritebackSourceHeight,
+ v->UnboundedRequestEnabled,
+ v->CompressedBufferSizeInkByte,
+ &DRAMClockChangeSupport,
+- &v->UrgentWatermark,
+- &v->WritebackUrgentWatermark,
+- &v->DRAMClockChangeWatermark,
+- &v->WritebackDRAMClockChangeWatermark,
+ &v->StutterExitWatermark,
+ &v->StutterEnterPlusExitWatermark,
+ &v->Z8StutterExitWatermark,
+- &v->Z8StutterEnterPlusExitWatermark,
+- &v->MinActiveDRAMClockChangeLatencySupported);
++ &v->Z8StutterEnterPlusExitWatermark);
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
+@@ -3696,61 +3584,43 @@ static void CalculateRowBandwidth(
+
+ static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
++ unsigned int k,
+ double HostVMInefficiencyFactor,
+ double UrgentExtraLatency,
+ double UrgentLatency,
+- unsigned int GPUVMMaxPageTableLevels,
+- bool HostVMEnable,
+- unsigned int HostVMMaxNonCachedPageTableLevels,
+- bool GPUVMEnable,
+- double HostVMMinPageSize,
+ double PDEAndMetaPTEBytesPerFrame,
+ double MetaRowBytes,
+- double DPTEBytesPerRow,
+- double BandwidthAvailableForImmediateFlip,
+- unsigned int TotImmediateFlipBytes,
+- enum source_format_class SourcePixelFormat,
+- double LineTime,
+- double VRatio,
+- double VRatioChroma,
+- double Tno_bw,
+- bool DCCEnable,
+- unsigned int dpte_row_height,
+- unsigned int meta_row_height,
+- unsigned int dpte_row_height_chroma,
+- unsigned int meta_row_height_chroma,
+- double *DestinationLinesToRequestVMInImmediateFlip,
+- double *DestinationLinesToRequestRowInImmediateFlip,
+- double *final_flip_bw,
+- bool *ImmediateFlipSupportedForPipe)
++ double DPTEBytesPerRow)
+ {
++ struct vba_vars_st *v = &mode_lib->vba;
+ double min_row_time = 0.0;
+ unsigned int HostVMDynamicLevelsTrips;
+ double TimeForFetchingMetaPTEImmediateFlip;
+ double TimeForFetchingRowInVBlankImmediateFlip;
+ double ImmediateFlipBW;
++ double LineTime = v->HTotal[k] / v->PixelClock[k];
+
+- if (GPUVMEnable == true && HostVMEnable == true) {
+- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
++ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
++ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
+ } else {
+ HostVMDynamicLevelsTrips = 0;
+ }
+
+- if (GPUVMEnable == true || DCCEnable == true) {
+- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
++ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
++ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
+ }
+
+- if (GPUVMEnable == true) {
++ if (v->GPUVMEnable == true) {
+ TimeForFetchingMetaPTEImmediateFlip = dml_max3(
+- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
++ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
++ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+ LineTime / 4.0);
+ } else {
+ TimeForFetchingMetaPTEImmediateFlip = 0;
+ }
+
+- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+- if ((GPUVMEnable == true || DCCEnable == true)) {
++ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
++ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+ TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
+ UrgentLatency * (HostVMDynamicLevelsTrips + 1),
+@@ -3759,54 +3629,54 @@ static void CalculateFlipSchedule(
+ TimeForFetchingRowInVBlankImmediateFlip = 0;
+ }
+
+- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
++ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+
+- if (GPUVMEnable == true) {
+- *final_flip_bw = dml_max(
+- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
+- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
+- } else if ((GPUVMEnable == true || DCCEnable == true)) {
+- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
++ if (v->GPUVMEnable == true) {
++ v->final_flip_bw[k] = dml_max(
++ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
++ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
++ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
++ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
+ } else {
+- *final_flip_bw = 0;
++ v->final_flip_bw[k] = 0;
+ }
+
+- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
+- if (GPUVMEnable == true && DCCEnable != true) {
+- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
+- } else if (GPUVMEnable != true && DCCEnable == true) {
+- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
++ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
++ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
++ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
++ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
++ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+ } else {
+ min_row_time = dml_min4(
+- dpte_row_height * LineTime / VRatio,
+- meta_row_height * LineTime / VRatio,
+- dpte_row_height_chroma * LineTime / VRatioChroma,
+- meta_row_height_chroma * LineTime / VRatioChroma);
++ v->dpte_row_height[k] * LineTime / v->VRatio[k],
++ v->meta_row_height[k] * LineTime / v->VRatio[k],
++ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
++ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+ }
+ } else {
+- if (GPUVMEnable == true && DCCEnable != true) {
+- min_row_time = dpte_row_height * LineTime / VRatio;
+- } else if (GPUVMEnable != true && DCCEnable == true) {
+- min_row_time = meta_row_height * LineTime / VRatio;
++ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
++ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
++ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
++ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
+ } else {
+- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
++ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
+ }
+ }
+
+- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
++ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
+ || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
+- *ImmediateFlipSupportedForPipe = false;
++ v->ImmediateFlipSupportedForPipe[k] = false;
+ } else {
+- *ImmediateFlipSupportedForPipe = true;
++ v->ImmediateFlipSupportedForPipe[k] = true;
+ }
+
+ #ifdef __DML_VBA_DEBUG__
+- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
+- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
++ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
++ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
+ dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
+ dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
+ dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
+- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
++ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
+ #endif
+
+ }
+@@ -5397,33 +5267,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ for (k = 0; k < v->NumberOfActivePlanes; k++) {
+ CalculateFlipSchedule(
+ mode_lib,
++ k,
+ HostVMInefficiencyFactor,
+ v->ExtraLatency,
+ v->UrgLatency[i],
+- v->GPUVMMaxPageTableLevels,
+- v->HostVMEnable,
+- v->HostVMMaxNonCachedPageTableLevels,
+- v->GPUVMEnable,
+- v->HostVMMinPageSize,
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k],
+ v->MetaRowBytes[i][j][k],
+- v->DPTEBytesPerRow[i][j][k],
+- v->BandwidthAvailableForImmediateFlip,
+- v->TotImmediateFlipBytes,
+- v->SourcePixelFormat[k],
+- v->HTotal[k] / v->PixelClock[k],
+- v->VRatio[k],
+- v->VRatioChroma[k],
+- v->Tno_bw[k],
+- v->DCCEnable[k],
+- v->dpte_row_height[k],
+- v->meta_row_height[k],
+- v->dpte_row_height_chroma[k],
+- v->meta_row_height_chroma[k],
+- &v->DestinationLinesToRequestVMInImmediateFlip[k],
+- &v->DestinationLinesToRequestRowInImmediateFlip[k],
+- &v->final_flip_bw[k],
+- &v->ImmediateFlipSupportedForPipe[k]);
++ v->DPTEBytesPerRow[i][j][k]);
+ }
+ v->total_dcn_read_bw_with_flip = 0.0;
+ for (k = 0; k < v->NumberOfActivePlanes; k++) {
+@@ -5481,64 +5331,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ CalculateWatermarksAndDRAMSpeedChangeSupport(
+ mode_lib,
+ v->PrefetchModePerState[i][j],
+- v->NumberOfActivePlanes,
+- v->MaxLineBufferLines,
+- v->LineBufferSize,
+- v->WritebackInterfaceBufferSize,
+ v->DCFCLKState[i][j],
+ v->ReturnBWPerState[i][j],
+- v->SynchronizedVBlank,
+- v->dpte_group_bytes,
+- v->MetaChunkSize,
+ v->UrgLatency[i],
+ v->ExtraLatency,
+- v->WritebackLatency,
+- v->WritebackChunkSize,
+ v->SOCCLKPerState[i],
+- v->DRAMClockChangeLatency,
+- v->SRExitTime,
+- v->SREnterPlusExitTime,
+- v->SRExitZ8Time,
+- v->SREnterPlusExitZ8Time,
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ v->DETBufferSizeYThisState,
+ v->DETBufferSizeCThisState,
+ v->SwathHeightYThisState,
+ v->SwathHeightCThisState,
+- v->LBBitPerPixel,
+ v->SwathWidthYThisState,
+ v->SwathWidthCThisState,
+- v->HRatio,
+- v->HRatioChroma,
+- v->vtaps,
+- v->VTAPsChroma,
+- v->VRatio,
+- v->VRatioChroma,
+- v->HTotal,
+- v->PixelClock,
+- v->BlendingAndTiming,
+ v->NoOfDPPThisState,
+ v->BytePerPixelInDETY,
+ v->BytePerPixelInDETC,
+- v->DSTXAfterScaler,
+- v->DSTYAfterScaler,
+- v->WritebackEnable,
+- v->WritebackPixelFormat,
+- v->WritebackDestinationWidth,
+- v->WritebackDestinationHeight,
+- v->WritebackSourceHeight,
+ UnboundedRequestEnabledThisState,
+ CompressedBufferSizeInkByteThisState,
+ &v->DRAMClockChangeSupport[i][j],
+- &v->UrgentWatermark,
+- &v->WritebackUrgentWatermark,
+- &v->DRAMClockChangeWatermark,
+- &v->WritebackDRAMClockChangeWatermark,
+- &dummy,
+ &dummy,
+ &dummy,
+ &dummy,
+- &v->MinActiveDRAMClockChangeLatencySupported);
++ &dummy);
+ }
+ }
+
+@@ -5663,64 +5477,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
+ struct display_mode_lib *mode_lib,
+ unsigned int PrefetchMode,
+- unsigned int NumberOfActivePlanes,
+- unsigned int MaxLineBufferLines,
+- unsigned int LineBufferSize,
+- unsigned int WritebackInterfaceBufferSize,
+ double DCFCLK,
+ double ReturnBW,
+- bool SynchronizedVBlank,
+- unsigned int dpte_group_bytes[],
+- unsigned int MetaChunkSize,
+ double UrgentLatency,
+ double ExtraLatency,
+- double WritebackLatency,
+- double WritebackChunkSize,
+ double SOCCLK,
+- double DRAMClockChangeLatency,
+- double SRExitTime,
+- double SREnterPlusExitTime,
+- double SRExitZ8Time,
+- double SREnterPlusExitZ8Time,
+ double DCFCLKDeepSleep,
+ unsigned int DETBufferSizeY[],
+ unsigned int DETBufferSizeC[],
+ unsigned int SwathHeightY[],
+ unsigned int SwathHeightC[],
+- unsigned int LBBitPerPixel[],
+ double SwathWidthY[],
+ double SwathWidthC[],
+- double HRatio[],
+- double HRatioChroma[],
+- unsigned int vtaps[],
+- unsigned int VTAPsChroma[],
+- double VRatio[],
+- double VRatioChroma[],
+- unsigned int HTotal[],
+- double PixelClock[],
+- unsigned int BlendingAndTiming[],
+ unsigned int DPPPerPlane[],
+ double BytePerPixelDETY[],
+ double BytePerPixelDETC[],
+- double DSTXAfterScaler[],
+- double DSTYAfterScaler[],
+- bool WritebackEnable[],
+- enum source_format_class WritebackPixelFormat[],
+- double WritebackDestinationWidth[],
+- double WritebackDestinationHeight[],
+- double WritebackSourceHeight[],
+ bool UnboundedRequestEnabled,
+ int unsigned CompressedBufferSizeInkByte,
+ enum clock_change_support *DRAMClockChangeSupport,
+- double *UrgentWatermark,
+- double *WritebackUrgentWatermark,
+- double *DRAMClockChangeWatermark,
+- double *WritebackDRAMClockChangeWatermark,
+ double *StutterExitWatermark,
+ double *StutterEnterPlusExitWatermark,
+ double *Z8StutterExitWatermark,
+- double *Z8StutterEnterPlusExitWatermark,
+- double *MinActiveDRAMClockChangeLatencySupported)
++ double *Z8StutterEnterPlusExitWatermark)
+ {
+ struct vba_vars_st *v = &mode_lib->vba;
+ double EffectiveLBLatencyHidingY;
+@@ -5740,103 +5518,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
+ double TotalPixelBW = 0.0;
+ int k, j;
+
+- *UrgentWatermark = UrgentLatency + ExtraLatency;
++ v->UrgentWatermark = UrgentLatency + ExtraLatency;
+
+ #ifdef __DML_VBA_DEBUG__
+ dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
+ dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
+- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
++ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
+ #endif
+
+- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
++ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
+
+ #ifdef __DML_VBA_DEBUG__
+- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
+- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
++ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
++ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
+ #endif
+
+ v->TotalActiveWriteback = 0;
+- for (k = 0; k < NumberOfActivePlanes; ++k) {
+- if (WritebackEnable[k] == true) {
++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
++ if (v->WritebackEnable[k] == true) {
+ v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
+ }
+ }
+
+ if (v->TotalActiveWriteback <= 1) {
+- *WritebackUrgentWatermark = WritebackLatency;
++ v->WritebackUrgentWatermark = v->WritebackLatency;
+ } else {
+- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
++ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ }
+
+ if (v->TotalActiveWriteback <= 1) {
+- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
++ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
+ } else {
+- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
++ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ }
+
+- for (k = 0; k < NumberOfActivePlanes; ++k) {
++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ TotalPixelBW = TotalPixelBW
+- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
+- / (HTotal[k] / PixelClock[k]);
++ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
++ / (v->HTotal[k] / v->PixelClock[k]);
+ }
+
+- for (k = 0; k < NumberOfActivePlanes; ++k) {
++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ double EffectiveDETBufferSizeY = DETBufferSizeY[k];
+
+ v->LBLatencyHidingSourceLinesY = dml_min(
+- (double) MaxLineBufferLines,
+- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
++ (double) v->MaxLineBufferLines,
++ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
+
+ v->LBLatencyHidingSourceLinesC = dml_min(
+- (double) MaxLineBufferLines,
+- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
++ (double) v->MaxLineBufferLines,
++ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
+
+- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
++ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
+
+- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
++ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
+
+ if (UnboundedRequestEnabled) {
+ EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
++ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
+ }
+
+ LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
+ LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
+- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
++ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
+ if (BytePerPixelDETC[k] > 0) {
+ LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
+ LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
+- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
++ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
+ } else {
+ LinesInDETC = 0;
+ FullDETBufferingTimeC = 999999;
+ }
+
+ ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
+- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
++ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
+
+- if (NumberOfActivePlanes > 1) {
++ if (v->NumberOfActivePlanes > 1) {
+ ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
+- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
++ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
+ }
+
+ if (BytePerPixelDETC[k] > 0) {
+ ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
+- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
++ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
+
+- if (NumberOfActivePlanes > 1) {
++ if (v->NumberOfActivePlanes > 1) {
+ ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
+- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
++ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
+ }
+ v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
+ } else {
+ v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
+ }
+
+- if (WritebackEnable[k] == true) {
+- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
+- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
+- if (WritebackPixelFormat[k] == dm_444_64) {
++ if (v->WritebackEnable[k] == true) {
++ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
++ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
++ if (v->WritebackPixelFormat[k] == dm_444_64) {
+ WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
+ }
+ WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
+@@ -5846,14 +5624,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
+
+ v->MinActiveDRAMClockChangeMargin = 999999;
+ PlaneWithMinActiveDRAMClockChangeMargin = 0;
+- for (k = 0; k < NumberOfActivePlanes; ++k) {
++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
+ v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
+- if (BlendingAndTiming[k] == k) {
++ if (v->BlendingAndTiming[k] == k) {
+ PlaneWithMinActiveDRAMClockChangeMargin = k;
+ } else {
+- for (j = 0; j < NumberOfActivePlanes; ++j) {
+- if (BlendingAndTiming[k] == j) {
++ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
++ if (v->BlendingAndTiming[k] == j) {
+ PlaneWithMinActiveDRAMClockChangeMargin = j;
+ }
+ }
+@@ -5861,11 +5639,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
+ }
+ }
+
+- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
++ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
+
+ SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
+- for (k = 0; k < NumberOfActivePlanes; ++k) {
+- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
++ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
+ SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
+ }
+@@ -5873,25 +5651,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
+
+ v->TotalNumberOfActiveOTG = 0;
+
+- for (k = 0; k < NumberOfActivePlanes; ++k) {
+- if (BlendingAndTiming[k] == k) {
++ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
++ if (v->BlendingAndTiming[k] == k) {
+ v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
+ }
+ }
+
+ if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
+ *DRAMClockChangeSupport = dm_dram_clock_change_vactive;
+- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
++ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+ || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
+ *DRAMClockChangeSupport = dm_dram_clock_change_vblank;
+ } else {
+ *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
+ }
+
+- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
++ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
++ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
++ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
++ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+
+ #ifdef __DML_VBA_DEBUG__
+ dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index 64a38f08f4974..5a51be753e87f 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -1603,6 +1603,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
+ struct fixed31_32 lut2;
+ struct fixed31_32 delta_lut;
+ struct fixed31_32 delta_index;
++ const struct fixed31_32 one = dc_fixpt_from_int(1);
+
+ i = 0;
+ /* fixed_pt library has problems handling too small values */
+@@ -1631,6 +1632,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
+ } else
+ hw_x = coordinates_x[i].x;
+
++ if (dc_fixpt_le(one, hw_x))
++ hw_x = one;
++
+ norm_x = dc_fixpt_mul(norm_factor, hw_x);
+ index = dc_fixpt_floor(norm_x);
+ if (index < 0 || index > 255)
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 32bb6b1d95261..d13e455c8827e 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -368,6 +368,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
+ smu_baco->platform_support =
+ (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
+ false;
++
++ /*
++ * Disable BACO entry/exit completely on below SKUs to
++ * avoid hardware intermittent failures.
++ */
++ if (((adev->pdev->device == 0x73A1) &&
++ (adev->pdev->revision == 0x00)) ||
++ ((adev->pdev->device == 0x73BF) &&
++ (adev->pdev->revision == 0xCF)))
++ smu_baco->platform_support = false;
++
+ }
+ }
+
+diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
+index dd32b484dd825..ce96234f3df20 100644
+--- a/drivers/gpu/drm/gma500/cdv_device.c
++++ b/drivers/gpu/drm/gma500/cdv_device.c
+@@ -581,11 +581,9 @@ static const struct psb_offset cdv_regmap[2] = {
+ static int cdv_chip_setup(struct drm_device *dev)
+ {
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
+
+- if (pci_enable_msi(pdev))
+- dev_warn(dev->dev, "Enabling MSI failed!\n");
++ dev_priv->use_msi = true;
+ dev_priv->regmap = cdv_regmap;
+ gma_get_core_freq(dev);
+ psb_intel_opregion_init(dev);
+diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
+index dffe37490206d..4b7627a726378 100644
+--- a/drivers/gpu/drm/gma500/gem.c
++++ b/drivers/gpu/drm/gma500/gem.c
+@@ -112,12 +112,12 @@ static void psb_gem_free_object(struct drm_gem_object *obj)
+ {
+ struct psb_gem_object *pobj = to_psb_gem_object(obj);
+
+- drm_gem_object_release(obj);
+-
+ /* Undo the mmap pin if we are destroying the object */
+ if (pobj->mmapping)
+ psb_gem_unpin(pobj);
+
++ drm_gem_object_release(obj);
++
+ WARN_ON(pobj->in_gart && !pobj->stolen);
+
+ release_resource(&pobj->resource);
+diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
+index 34ec3fca09ba6..12287c9bb4d80 100644
+--- a/drivers/gpu/drm/gma500/gma_display.c
++++ b/drivers/gpu/drm/gma500/gma_display.c
+@@ -531,15 +531,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ gma_crtc->page_flip_event = event;
++ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* Call this locked if we want an event at vblank interrupt. */
+ ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+ if (ret) {
+- gma_crtc->page_flip_event = NULL;
+- drm_crtc_vblank_put(crtc);
++ spin_lock_irqsave(&dev->event_lock, flags);
++ if (gma_crtc->page_flip_event) {
++ gma_crtc->page_flip_event = NULL;
++ drm_crtc_vblank_put(crtc);
++ }
++ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+-
+- spin_unlock_irqrestore(&dev->event_lock, flags);
+ } else {
+ ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+ }
+diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
+index 5923a9c893122..f90e628cb482c 100644
+--- a/drivers/gpu/drm/gma500/oaktrail_device.c
++++ b/drivers/gpu/drm/gma500/oaktrail_device.c
+@@ -501,12 +501,9 @@ static const struct psb_offset oaktrail_regmap[2] = {
+ static int oaktrail_chip_setup(struct drm_device *dev)
+ {
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int ret;
+
+- if (pci_enable_msi(pdev))
+- dev_warn(dev->dev, "Enabling MSI failed!\n");
+-
++ dev_priv->use_msi = true;
+ dev_priv->regmap = oaktrail_regmap;
+
+ ret = mid_chip_setup(dev);
+diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
+index b91de6d36e412..66873085d4505 100644
+--- a/drivers/gpu/drm/gma500/power.c
++++ b/drivers/gpu/drm/gma500/power.c
+@@ -139,8 +139,6 @@ static void gma_suspend_pci(struct pci_dev *pdev)
+ dev_priv->regs.saveBSM = bsm;
+ pci_read_config_dword(pdev, 0xFC, &vbt);
+ dev_priv->regs.saveVBT = vbt;
+- pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
+- pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+@@ -168,9 +166,6 @@ static bool gma_resume_pci(struct pci_dev *pdev)
+ pci_restore_state(pdev);
+ pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
+ pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
+- /* restoring MSI address and data in PCIx space */
+- pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
+- pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
+ ret = pci_enable_device(pdev);
+
+ if (ret != 0)
+@@ -223,8 +218,7 @@ int gma_power_resume(struct device *_dev)
+ mutex_lock(&power_mutex);
+ gma_resume_pci(pdev);
+ gma_resume_display(pdev);
+- gma_irq_preinstall(dev);
+- gma_irq_postinstall(dev);
++ gma_irq_install(dev);
+ mutex_unlock(&power_mutex);
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
+index 1d8744f3e7020..54e756b486060 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.c
++++ b/drivers/gpu/drm/gma500/psb_drv.c
+@@ -383,7 +383,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+- gma_irq_install(dev, pdev->irq);
++ gma_irq_install(dev);
+
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+
+diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
+index 0ddfec1a0851d..4c3fc5eaf6ad5 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.h
++++ b/drivers/gpu/drm/gma500/psb_drv.h
+@@ -490,6 +490,7 @@ struct drm_psb_private {
+ int rpm_enabled;
+
+ /* MID specific */
++ bool use_msi;
+ bool has_gct;
+ struct oaktrail_gct_data gct_data;
+
+@@ -499,10 +500,6 @@ struct drm_psb_private {
+ /* Register state */
+ struct psb_save_area regs;
+
+- /* MSI reg save */
+- uint32_t msi_addr;
+- uint32_t msi_data;
+-
+ /* Hotplug handling */
+ struct work_struct hotplug_work;
+
+diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
+index e6e6d61bbeab6..038f18ed0a95e 100644
+--- a/drivers/gpu/drm/gma500/psb_irq.c
++++ b/drivers/gpu/drm/gma500/psb_irq.c
+@@ -316,17 +316,24 @@ void gma_irq_postinstall(struct drm_device *dev)
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+ }
+
+-int gma_irq_install(struct drm_device *dev, unsigned int irq)
++int gma_irq_install(struct drm_device *dev)
+ {
++ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
++ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int ret;
+
+- if (irq == IRQ_NOTCONNECTED)
++ if (dev_priv->use_msi && pci_enable_msi(pdev)) {
++ dev_warn(dev->dev, "Enabling MSI failed!\n");
++ dev_priv->use_msi = false;
++ }
++
++ if (pdev->irq == IRQ_NOTCONNECTED)
+ return -ENOTCONN;
+
+ gma_irq_preinstall(dev);
+
+ /* PCI devices require shared interrupts. */
+- ret = request_irq(irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
++ ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
+ if (ret)
+ return ret;
+
+@@ -369,6 +376,8 @@ void gma_irq_uninstall(struct drm_device *dev)
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+ free_irq(pdev->irq, dev);
++ if (dev_priv->use_msi)
++ pci_disable_msi(pdev);
+ }
+
+ int gma_crtc_enable_vblank(struct drm_crtc *crtc)
+diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
+index b51e395194fff..7648f69824a5d 100644
+--- a/drivers/gpu/drm/gma500/psb_irq.h
++++ b/drivers/gpu/drm/gma500/psb_irq.h
+@@ -17,7 +17,7 @@ struct drm_device;
+
+ void gma_irq_preinstall(struct drm_device *dev);
+ void gma_irq_postinstall(struct drm_device *dev);
+-int gma_irq_install(struct drm_device *dev, unsigned int irq);
++int gma_irq_install(struct drm_device *dev);
+ void gma_irq_uninstall(struct drm_device *dev);
+
+ int gma_crtc_enable_vblank(struct drm_crtc *crtc);
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+index 073adfe438ddd..4e41c144a2902 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
++++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+@@ -2,6 +2,7 @@
+ config DRM_HISI_HIBMC
+ tristate "DRM Support for Hisilicon Hibmc"
+ depends on DRM && PCI && (ARM64 || COMPILE_TEST)
++ depends on MMU
+ select DRM_KMS_HELPER
+ select DRM_VRAM_HELPER
+ select DRM_TTM
+diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
+index 5a957acebfd62..82ad8fe7440c0 100644
+--- a/drivers/gpu/drm/i915/display/g4x_dp.c
++++ b/drivers/gpu/drm/i915/display/g4x_dp.c
+@@ -395,26 +395,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+
+- if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
+- pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
+- /*
+- * This is a big fat ugly hack.
+- *
+- * Some machines in UEFI boot mode provide us a VBT that has 18
+- * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+- * unknown we fail to light up. Yet the same BIOS boots up with
+- * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+- * max, not what it tells us to use.
+- *
+- * Note: This will still be broken if the eDP panel is not lit
+- * up by the BIOS, and thus we can't get the mode at module
+- * load.
+- */
+- drm_dbg_kms(&dev_priv->drm,
+- "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+- dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
+- }
++ if (intel_dp_is_edp(intel_dp))
++ intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
+ }
+
+ static void
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index 5508ebb9eb434..f416499dad6f3 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -1864,7 +1864,8 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
+ {
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
++ struct intel_connector *connector = intel_dsi->attached_connector;
++ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
+ u32 tlpx_ns;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+ u32 ths_prepare_ns, tclk_trail_ns;
+@@ -2051,6 +2052,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
+ /* attach connector to encoder */
+ intel_connector_attach_encoder(intel_connector, encoder);
+
++ intel_bios_init_panel(dev_priv, &intel_connector->panel);
++
+ mutex_lock(&dev->mode_config.mutex);
+ intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ mutex_unlock(&dev->mode_config.mutex);
+@@ -2064,13 +2067,20 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
+
+ intel_backlight_setup(intel_connector, INVALID_PIPE);
+
+- if (dev_priv->vbt.dsi.config->dual_link)
++ if (intel_connector->panel.vbt.dsi.config->dual_link)
+ intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
+ else
+ intel_dsi->ports = BIT(port);
+
+- intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
+- intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
++ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
++ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
++
++ intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
++
++ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
++ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
++
++ intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ struct intel_dsi_host *host;
+diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
+index 3e200a2e4ba29..5182bb66bd289 100644
+--- a/drivers/gpu/drm/i915/display/intel_backlight.c
++++ b/drivers/gpu/drm/i915/display/intel_backlight.c
+@@ -1158,9 +1158,10 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
+ }
+
+-static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv)
++static u16 get_vbt_pwm_freq(struct intel_connector *connector)
+ {
+- u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
++ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
++ u16 pwm_freq_hz = connector->panel.vbt.backlight.pwm_freq_hz;
+
+ if (pwm_freq_hz) {
+ drm_dbg_kms(&dev_priv->drm,
+@@ -1180,7 +1181,7 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
+ {
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+- u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
++ u16 pwm_freq_hz = get_vbt_pwm_freq(connector);
+ u32 pwm;
+
+ if (!panel->backlight.pwm_funcs->hz_to_pwm) {
+@@ -1217,11 +1218,11 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
+ * against this by letting the minimum be at most (arbitrarily chosen)
+ * 25% of the max.
+ */
+- min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
+- if (min != dev_priv->vbt.backlight.min_brightness) {
++ min = clamp_t(int, connector->panel.vbt.backlight.min_brightness, 0, 64);
++ if (min != connector->panel.vbt.backlight.min_brightness) {
+ drm_dbg_kms(&dev_priv->drm,
+ "clamping VBT min backlight %d/255 to %d/255\n",
+- dev_priv->vbt.backlight.min_brightness, min);
++ connector->panel.vbt.backlight.min_brightness, min);
+ }
+
+ /* vbt value is a coefficient in range [0..255] */
+@@ -1410,7 +1411,7 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl, val;
+
+- panel->backlight.controller = dev_priv->vbt.backlight.controller;
++ panel->backlight.controller = connector->panel.vbt.backlight.controller;
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+@@ -1483,7 +1484,7 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
+ u32 level;
+
+ /* Get the right PWM chip for DSI backlight according to VBT */
+- if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
++ if (connector->panel.vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
+ desc = "PMIC";
+ } else {
+@@ -1512,11 +1513,11 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
+
+ drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
+- get_vbt_pwm_freq(dev_priv), level);
++ get_vbt_pwm_freq(connector), level);
+ } else {
+ /* Set period from VBT frequency, leave other settings at 0. */
+ panel->backlight.pwm_state.period =
+- NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv);
++ NSEC_PER_SEC / get_vbt_pwm_freq(connector);
+ }
+
+ drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
+@@ -1601,7 +1602,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+
+- if (!dev_priv->vbt.backlight.present) {
++ if (!connector->panel.vbt.backlight.present) {
+ if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT, but present per quirk\n");
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
+index 91caf4523b34d..b5de61fe9cc67 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.c
++++ b/drivers/gpu/drm/i915/display/intel_bios.c
+@@ -682,7 +682,8 @@ static int get_panel_type(struct drm_i915_private *i915)
+
+ /* Parse general panel options */
+ static void
+-parse_panel_options(struct drm_i915_private *i915)
++parse_panel_options(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+ const struct bdb_lvds_options *lvds_options;
+ int panel_type;
+@@ -692,11 +693,11 @@ parse_panel_options(struct drm_i915_private *i915)
+ if (!lvds_options)
+ return;
+
+- i915->vbt.lvds_dither = lvds_options->pixel_dither;
++ panel->vbt.lvds_dither = lvds_options->pixel_dither;
+
+ panel_type = get_panel_type(i915);
+
+- i915->vbt.panel_type = panel_type;
++ panel->vbt.panel_type = panel_type;
+
+ drrs_mode = (lvds_options->dps_panel_type_bits
+ >> (panel_type * 2)) & MODE_MASK;
+@@ -707,16 +708,16 @@ parse_panel_options(struct drm_i915_private *i915)
+ */
+ switch (drrs_mode) {
+ case 0:
+- i915->vbt.drrs_type = DRRS_TYPE_STATIC;
++ panel->vbt.drrs_type = DRRS_TYPE_STATIC;
+ drm_dbg_kms(&i915->drm, "DRRS supported mode is static\n");
+ break;
+ case 2:
+- i915->vbt.drrs_type = DRRS_TYPE_SEAMLESS;
++ panel->vbt.drrs_type = DRRS_TYPE_SEAMLESS;
+ drm_dbg_kms(&i915->drm,
+ "DRRS supported mode is seamless\n");
+ break;
+ default:
+- i915->vbt.drrs_type = DRRS_TYPE_NONE;
++ panel->vbt.drrs_type = DRRS_TYPE_NONE;
+ drm_dbg_kms(&i915->drm,
+ "DRRS not supported (VBT input)\n");
+ break;
+@@ -725,13 +726,14 @@ parse_panel_options(struct drm_i915_private *i915)
+
+ static void
+ parse_lfp_panel_dtd(struct drm_i915_private *i915,
++ struct intel_panel *panel,
+ const struct bdb_lvds_lfp_data *lvds_lfp_data,
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs)
+ {
+ const struct lvds_dvo_timing *panel_dvo_timing;
+ const struct lvds_fp_timing *fp_timing;
+ struct drm_display_mode *panel_fixed_mode;
+- int panel_type = i915->vbt.panel_type;
++ int panel_type = panel->vbt.panel_type;
+
+ panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+@@ -743,7 +745,7 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
+
+ fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
+
+- i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
++ panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+
+ drm_dbg_kms(&i915->drm,
+ "Found panel mode in BIOS VBT legacy lfp table: " DRM_MODE_FMT "\n",
+@@ -756,20 +758,21 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915,
+ /* check the resolution, just to be sure */
+ if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+ fp_timing->y_res == panel_fixed_mode->vdisplay) {
+- i915->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
++ panel->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
+ drm_dbg_kms(&i915->drm,
+ "VBT initial LVDS value %x\n",
+- i915->vbt.bios_lvds_val);
++ panel->vbt.bios_lvds_val);
+ }
+ }
+
+ static void
+-parse_lfp_data(struct drm_i915_private *i915)
++parse_lfp_data(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+ const struct bdb_lvds_lfp_data *data;
+ const struct bdb_lvds_lfp_data_tail *tail;
+ const struct bdb_lvds_lfp_data_ptrs *ptrs;
+- int panel_type = i915->vbt.panel_type;
++ int panel_type = panel->vbt.panel_type;
+
+ ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ if (!ptrs)
+@@ -779,24 +782,25 @@ parse_lfp_data(struct drm_i915_private *i915)
+ if (!data)
+ return;
+
+- if (!i915->vbt.lfp_lvds_vbt_mode)
+- parse_lfp_panel_dtd(i915, data, ptrs);
++ if (!panel->vbt.lfp_lvds_vbt_mode)
++ parse_lfp_panel_dtd(i915, panel, data, ptrs);
+
+ tail = get_lfp_data_tail(data, ptrs);
+ if (!tail)
+ return;
+
+ if (i915->vbt.version >= 188) {
+- i915->vbt.seamless_drrs_min_refresh_rate =
++ panel->vbt.seamless_drrs_min_refresh_rate =
+ tail->seamless_drrs_min_refresh_rate[panel_type];
+ drm_dbg_kms(&i915->drm,
+ "Seamless DRRS min refresh rate: %d Hz\n",
+- i915->vbt.seamless_drrs_min_refresh_rate);
++ panel->vbt.seamless_drrs_min_refresh_rate);
+ }
+ }
+
+ static void
+-parse_generic_dtd(struct drm_i915_private *i915)
++parse_generic_dtd(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+ const struct bdb_generic_dtd *generic_dtd;
+ const struct generic_dtd_entry *dtd;
+@@ -831,14 +835,14 @@ parse_generic_dtd(struct drm_i915_private *i915)
+
+ num_dtd = (get_blocksize(generic_dtd) -
+ sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
+- if (i915->vbt.panel_type >= num_dtd) {
++ if (panel->vbt.panel_type >= num_dtd) {
+ drm_err(&i915->drm,
+ "Panel type %d not found in table of %d DTD's\n",
+- i915->vbt.panel_type, num_dtd);
++ panel->vbt.panel_type, num_dtd);
+ return;
+ }
+
+- dtd = &generic_dtd->dtd[i915->vbt.panel_type];
++ dtd = &generic_dtd->dtd[panel->vbt.panel_type];
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+@@ -881,15 +885,16 @@ parse_generic_dtd(struct drm_i915_private *i915)
+ "Found panel mode in BIOS VBT generic dtd table: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(panel_fixed_mode));
+
+- i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
++ panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+ }
+
+ static void
+-parse_lfp_backlight(struct drm_i915_private *i915)
++parse_lfp_backlight(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+ const struct bdb_lfp_backlight_data *backlight_data;
+ const struct lfp_backlight_data_entry *entry;
+- int panel_type = i915->vbt.panel_type;
++ int panel_type = panel->vbt.panel_type;
+ u16 level;
+
+ backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT);
+@@ -905,15 +910,15 @@ parse_lfp_backlight(struct drm_i915_private *i915)
+
+ entry = &backlight_data->data[panel_type];
+
+- i915->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
+- if (!i915->vbt.backlight.present) {
++ panel->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
++ if (!panel->vbt.backlight.present) {
+ drm_dbg_kms(&i915->drm,
+ "PWM backlight not present in VBT (type %u)\n",
+ entry->type);
+ return;
+ }
+
+- i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
++ panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ if (i915->vbt.version >= 191) {
+ size_t exp_size;
+
+@@ -928,13 +933,13 @@ parse_lfp_backlight(struct drm_i915_private *i915)
+ const struct lfp_backlight_control_method *method;
+
+ method = &backlight_data->backlight_control[panel_type];
+- i915->vbt.backlight.type = method->type;
+- i915->vbt.backlight.controller = method->controller;
++ panel->vbt.backlight.type = method->type;
++ panel->vbt.backlight.controller = method->controller;
+ }
+ }
+
+- i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+- i915->vbt.backlight.active_low_pwm = entry->active_low_pwm;
++ panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
++ panel->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+
+ if (i915->vbt.version >= 234) {
+ u16 min_level;
+@@ -955,28 +960,29 @@ parse_lfp_backlight(struct drm_i915_private *i915)
+ drm_warn(&i915->drm, "Brightness min level > 255\n");
+ level = 255;
+ }
+- i915->vbt.backlight.min_brightness = min_level;
++ panel->vbt.backlight.min_brightness = min_level;
+
+- i915->vbt.backlight.brightness_precision_bits =
++ panel->vbt.backlight.brightness_precision_bits =
+ backlight_data->brightness_precision_bits[panel_type];
+ } else {
+ level = backlight_data->level[panel_type];
+- i915->vbt.backlight.min_brightness = entry->min_brightness;
++ panel->vbt.backlight.min_brightness = entry->min_brightness;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "VBT backlight PWM modulation frequency %u Hz, "
+ "active %s, min brightness %u, level %u, controller %u\n",
+- i915->vbt.backlight.pwm_freq_hz,
+- i915->vbt.backlight.active_low_pwm ? "low" : "high",
+- i915->vbt.backlight.min_brightness,
++ panel->vbt.backlight.pwm_freq_hz,
++ panel->vbt.backlight.active_low_pwm ? "low" : "high",
++ panel->vbt.backlight.min_brightness,
+ level,
+- i915->vbt.backlight.controller);
++ panel->vbt.backlight.controller);
+ }
+
+ /* Try to find sdvo panel data */
+ static void
+-parse_sdvo_panel_data(struct drm_i915_private *i915)
++parse_sdvo_panel_data(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+ const struct bdb_sdvo_panel_dtds *dtds;
+ struct drm_display_mode *panel_fixed_mode;
+@@ -1009,7 +1015,7 @@ parse_sdvo_panel_data(struct drm_i915_private *i915)
+
+ fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]);
+
+- i915->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
++ panel->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+ drm_dbg_kms(&i915->drm,
+ "Found SDVO panel mode in BIOS VBT tables: " DRM_MODE_FMT "\n",
+@@ -1188,6 +1194,17 @@ parse_driver_features(struct drm_i915_private *i915)
+ driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
+ i915->vbt.int_lvds_support = 0;
+ }
++}
++
++static void
++parse_panel_driver_features(struct drm_i915_private *i915,
++ struct intel_panel *panel)
++{
++ const struct bdb_driver_features *driver;
++
++ driver = find_section(i915, BDB_DRIVER_FEATURES);
++ if (!driver)
++ return;
+
+ if (i915->vbt.version < 228) {
+ drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n",
+@@ -1199,17 +1216,18 @@ parse_driver_features(struct drm_i915_private *i915)
+ * driver->drrs_enabled=false
+ */
+ if (!driver->drrs_enabled)
+- i915->vbt.drrs_type = DRRS_TYPE_NONE;
++ panel->vbt.drrs_type = DRRS_TYPE_NONE;
+
+- i915->vbt.psr.enable = driver->psr_enabled;
++ panel->vbt.psr.enable = driver->psr_enabled;
+ }
+ }
+
+ static void
+-parse_power_conservation_features(struct drm_i915_private *i915)
++parse_power_conservation_features(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+ const struct bdb_lfp_power *power;
+- u8 panel_type = i915->vbt.panel_type;
++ u8 panel_type = panel->vbt.panel_type;
+
+ if (i915->vbt.version < 228)
+ return;
+@@ -1218,7 +1236,7 @@ parse_power_conservation_features(struct drm_i915_private *i915)
+ if (!power)
+ return;
+
+- i915->vbt.psr.enable = power->psr & BIT(panel_type);
++ panel->vbt.psr.enable = power->psr & BIT(panel_type);
+
+ /*
+ * If DRRS is not supported, drrs_type has to be set to 0.
+@@ -1227,19 +1245,20 @@ parse_power_conservation_features(struct drm_i915_private *i915)
+ * power->drrs & BIT(panel_type)=false
+ */
+ if (!(power->drrs & BIT(panel_type)))
+- i915->vbt.drrs_type = DRRS_TYPE_NONE;
++ panel->vbt.drrs_type = DRRS_TYPE_NONE;
+
+ if (i915->vbt.version >= 232)
+- i915->vbt.edp.hobl = power->hobl & BIT(panel_type);
++ panel->vbt.edp.hobl = power->hobl & BIT(panel_type);
+ }
+
+ static void
+-parse_edp(struct drm_i915_private *i915)
++parse_edp(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+ const struct bdb_edp *edp;
+ const struct edp_power_seq *edp_pps;
+ const struct edp_fast_link_params *edp_link_params;
+- int panel_type = i915->vbt.panel_type;
++ int panel_type = panel->vbt.panel_type;
+
+ edp = find_section(i915, BDB_EDP);
+ if (!edp)
+@@ -1247,13 +1266,13 @@ parse_edp(struct drm_i915_private *i915)
+
+ switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ case EDP_18BPP:
+- i915->vbt.edp.bpp = 18;
++ panel->vbt.edp.bpp = 18;
+ break;
+ case EDP_24BPP:
+- i915->vbt.edp.bpp = 24;
++ panel->vbt.edp.bpp = 24;
+ break;
+ case EDP_30BPP:
+- i915->vbt.edp.bpp = 30;
++ panel->vbt.edp.bpp = 30;
+ break;
+ }
+
+@@ -1261,14 +1280,14 @@ parse_edp(struct drm_i915_private *i915)
+ edp_pps = &edp->power_seqs[panel_type];
+ edp_link_params = &edp->fast_link_params[panel_type];
+
+- i915->vbt.edp.pps = *edp_pps;
++ panel->vbt.edp.pps = *edp_pps;
+
+ switch (edp_link_params->rate) {
+ case EDP_RATE_1_62:
+- i915->vbt.edp.rate = DP_LINK_BW_1_62;
++ panel->vbt.edp.rate = DP_LINK_BW_1_62;
+ break;
+ case EDP_RATE_2_7:
+- i915->vbt.edp.rate = DP_LINK_BW_2_7;
++ panel->vbt.edp.rate = DP_LINK_BW_2_7;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+@@ -1279,13 +1298,13 @@ parse_edp(struct drm_i915_private *i915)
+
+ switch (edp_link_params->lanes) {
+ case EDP_LANE_1:
+- i915->vbt.edp.lanes = 1;
++ panel->vbt.edp.lanes = 1;
+ break;
+ case EDP_LANE_2:
+- i915->vbt.edp.lanes = 2;
++ panel->vbt.edp.lanes = 2;
+ break;
+ case EDP_LANE_4:
+- i915->vbt.edp.lanes = 4;
++ panel->vbt.edp.lanes = 4;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+@@ -1296,16 +1315,16 @@ parse_edp(struct drm_i915_private *i915)
+
+ switch (edp_link_params->preemphasis) {
+ case EDP_PREEMPHASIS_NONE:
+- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
++ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
+ break;
+ case EDP_PREEMPHASIS_3_5dB:
+- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
++ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
+ break;
+ case EDP_PREEMPHASIS_6dB:
+- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
++ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
+ break;
+ case EDP_PREEMPHASIS_9_5dB:
+- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
++ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+@@ -1316,16 +1335,16 @@ parse_edp(struct drm_i915_private *i915)
+
+ switch (edp_link_params->vswing) {
+ case EDP_VSWING_0_4V:
+- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
++ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ break;
+ case EDP_VSWING_0_6V:
+- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
++ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ break;
+ case EDP_VSWING_0_8V:
+- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
++ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ break;
+ case EDP_VSWING_1_2V:
+- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
++ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+@@ -1339,24 +1358,25 @@ parse_edp(struct drm_i915_private *i915)
+
+ /* Don't read from VBT if module parameter has valid value*/
+ if (i915->params.edp_vswing) {
+- i915->vbt.edp.low_vswing =
++ panel->vbt.edp.low_vswing =
+ i915->params.edp_vswing == 1;
+ } else {
+ vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
+- i915->vbt.edp.low_vswing = vswing == 0;
++ panel->vbt.edp.low_vswing = vswing == 0;
+ }
+ }
+
+- i915->vbt.edp.drrs_msa_timing_delay =
++ panel->vbt.edp.drrs_msa_timing_delay =
+ (edp->sdrrs_msa_timing_delay >> (panel_type * 2)) & 3;
+ }
+
+ static void
+-parse_psr(struct drm_i915_private *i915)
++parse_psr(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+ const struct bdb_psr *psr;
+ const struct psr_table *psr_table;
+- int panel_type = i915->vbt.panel_type;
++ int panel_type = panel->vbt.panel_type;
+
+ psr = find_section(i915, BDB_PSR);
+ if (!psr) {
+@@ -1366,11 +1386,11 @@ parse_psr(struct drm_i915_private *i915)
+
+ psr_table = &psr->psr_table[panel_type];
+
+- i915->vbt.psr.full_link = psr_table->full_link;
+- i915->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
++ panel->vbt.psr.full_link = psr_table->full_link;
++ panel->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
+
+ /* Allowed VBT values goes from 0 to 15 */
+- i915->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
++ panel->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
+ psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
+
+ /*
+@@ -1381,13 +1401,13 @@ parse_psr(struct drm_i915_private *i915)
+ (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) {
+ switch (psr_table->tp1_wakeup_time) {
+ case 0:
+- i915->vbt.psr.tp1_wakeup_time_us = 500;
++ panel->vbt.psr.tp1_wakeup_time_us = 500;
+ break;
+ case 1:
+- i915->vbt.psr.tp1_wakeup_time_us = 100;
++ panel->vbt.psr.tp1_wakeup_time_us = 100;
+ break;
+ case 3:
+- i915->vbt.psr.tp1_wakeup_time_us = 0;
++ panel->vbt.psr.tp1_wakeup_time_us = 0;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+@@ -1395,19 +1415,19 @@ parse_psr(struct drm_i915_private *i915)
+ psr_table->tp1_wakeup_time);
+ fallthrough;
+ case 2:
+- i915->vbt.psr.tp1_wakeup_time_us = 2500;
++ panel->vbt.psr.tp1_wakeup_time_us = 2500;
+ break;
+ }
+
+ switch (psr_table->tp2_tp3_wakeup_time) {
+ case 0:
+- i915->vbt.psr.tp2_tp3_wakeup_time_us = 500;
++ panel->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+ break;
+ case 1:
+- i915->vbt.psr.tp2_tp3_wakeup_time_us = 100;
++ panel->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+ break;
+ case 3:
+- i915->vbt.psr.tp2_tp3_wakeup_time_us = 0;
++ panel->vbt.psr.tp2_tp3_wakeup_time_us = 0;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+@@ -1415,12 +1435,12 @@ parse_psr(struct drm_i915_private *i915)
+ psr_table->tp2_tp3_wakeup_time);
+ fallthrough;
+ case 2:
+- i915->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
++ panel->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+ break;
+ }
+ } else {
+- i915->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+- i915->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
++ panel->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
++ panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+ }
+
+ if (i915->vbt.version >= 226) {
+@@ -1442,62 +1462,66 @@ parse_psr(struct drm_i915_private *i915)
+ wakeup_time = 2500;
+ break;
+ }
+- i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
++ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
+ } else {
+ /* Reusing PSR1 wakeup time for PSR2 in older VBTs */
+- i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = i915->vbt.psr.tp2_tp3_wakeup_time_us;
++ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = panel->vbt.psr.tp2_tp3_wakeup_time_us;
+ }
+ }
+
+ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
+- u16 version, enum port port)
++ struct intel_panel *panel,
++ enum port port)
+ {
+- if (!i915->vbt.dsi.config->dual_link || version < 197) {
+- i915->vbt.dsi.bl_ports = BIT(port);
+- if (i915->vbt.dsi.config->cabc_supported)
+- i915->vbt.dsi.cabc_ports = BIT(port);
++ enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C;
++
++ if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) {
++ panel->vbt.dsi.bl_ports = BIT(port);
++ if (panel->vbt.dsi.config->cabc_supported)
++ panel->vbt.dsi.cabc_ports = BIT(port);
+
+ return;
+ }
+
+- switch (i915->vbt.dsi.config->dl_dcs_backlight_ports) {
++ switch (panel->vbt.dsi.config->dl_dcs_backlight_ports) {
+ case DL_DCS_PORT_A:
+- i915->vbt.dsi.bl_ports = BIT(PORT_A);
++ panel->vbt.dsi.bl_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+- i915->vbt.dsi.bl_ports = BIT(PORT_C);
++ panel->vbt.dsi.bl_ports = BIT(port_bc);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+- i915->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
++ panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc);
+ break;
+ }
+
+- if (!i915->vbt.dsi.config->cabc_supported)
++ if (!panel->vbt.dsi.config->cabc_supported)
+ return;
+
+- switch (i915->vbt.dsi.config->dl_dcs_cabc_ports) {
++ switch (panel->vbt.dsi.config->dl_dcs_cabc_ports) {
+ case DL_DCS_PORT_A:
+- i915->vbt.dsi.cabc_ports = BIT(PORT_A);
++ panel->vbt.dsi.cabc_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+- i915->vbt.dsi.cabc_ports = BIT(PORT_C);
++ panel->vbt.dsi.cabc_ports = BIT(port_bc);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+- i915->vbt.dsi.cabc_ports =
+- BIT(PORT_A) | BIT(PORT_C);
++ panel->vbt.dsi.cabc_ports =
++ BIT(PORT_A) | BIT(port_bc);
+ break;
+ }
+ }
+
+ static void
+-parse_mipi_config(struct drm_i915_private *i915)
++parse_mipi_config(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+ const struct bdb_mipi_config *start;
+ const struct mipi_config *config;
+ const struct mipi_pps_data *pps;
+- int panel_type = i915->vbt.panel_type;
++ int panel_type = panel->vbt.panel_type;
+ enum port port;
+
+ /* parse MIPI blocks only if LFP type is MIPI */
+@@ -1505,7 +1529,7 @@ parse_mipi_config(struct drm_i915_private *i915)
+ return;
+
+ /* Initialize this to undefined indicating no generic MIPI support */
+- i915->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
++ panel->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
+
+ /* Block #40 is already parsed and panel_fixed_mode is
+ * stored in i915->lfp_lvds_vbt_mode
+@@ -1532,17 +1556,17 @@ parse_mipi_config(struct drm_i915_private *i915)
+ pps = &start->pps[panel_type];
+
+ /* store as of now full data. Trim when we realise all is not needed */
+- i915->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
+- if (!i915->vbt.dsi.config)
++ panel->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
++ if (!panel->vbt.dsi.config)
+ return;
+
+- i915->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
+- if (!i915->vbt.dsi.pps) {
+- kfree(i915->vbt.dsi.config);
++ panel->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
++ if (!panel->vbt.dsi.pps) {
++ kfree(panel->vbt.dsi.config);
+ return;
+ }
+
+- parse_dsi_backlight_ports(i915, i915->vbt.version, port);
++ parse_dsi_backlight_ports(i915, panel, port);
+
+ /* FIXME is the 90 vs. 270 correct? */
+ switch (config->rotation) {
+@@ -1551,25 +1575,25 @@ parse_mipi_config(struct drm_i915_private *i915)
+ * Most (all?) VBTs claim 0 degrees despite having
+ * an upside down panel, thus we do not trust this.
+ */
+- i915->vbt.dsi.orientation =
++ panel->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ break;
+ case ENABLE_ROTATION_90:
+- i915->vbt.dsi.orientation =
++ panel->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
+ break;
+ case ENABLE_ROTATION_180:
+- i915->vbt.dsi.orientation =
++ panel->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ break;
+ case ENABLE_ROTATION_270:
+- i915->vbt.dsi.orientation =
++ panel->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
+ break;
+ }
+
+ /* We have mandatory mipi config blocks. Initialize as generic panel */
+- i915->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
++ panel->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+ }
+
+ /* Find the sequence block and size for the given panel. */
+@@ -1732,13 +1756,14 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
+ * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
+ * skip all delay + gpio operands and stop at the first DSI packet op.
+ */
+-static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915)
++static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+- const u8 *data = i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
++ const u8 *data = panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ int index, len;
+
+ if (drm_WARN_ON(&i915->drm,
+- !data || i915->vbt.dsi.seq_version != 1))
++ !data || panel->vbt.dsi.seq_version != 1))
+ return 0;
+
+ /* index = 1 to skip sequence byte */
+@@ -1766,7 +1791,8 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915)
+ * these devices we split the init OTP sequence into a deassert sequence and
+ * the actual init OTP part.
+ */
+-static void fixup_mipi_sequences(struct drm_i915_private *i915)
++static void fixup_mipi_sequences(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+ u8 *init_otp;
+ int len;
+@@ -1776,18 +1802,18 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915)
+ return;
+
+ /* Limit this to v1 vid-mode sequences */
+- if (i915->vbt.dsi.config->is_cmd_mode ||
+- i915->vbt.dsi.seq_version != 1)
++ if (panel->vbt.dsi.config->is_cmd_mode ||
++ panel->vbt.dsi.seq_version != 1)
+ return;
+
+ /* Only do this if there are otp and assert seqs and no deassert seq */
+- if (!i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
+- !i915->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
+- i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
++ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
++ !panel->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
++ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
+ return;
+
+ /* The deassert-sequence ends at the first DSI packet */
+- len = get_init_otp_deassert_fragment_len(i915);
++ len = get_init_otp_deassert_fragment_len(i915, panel);
+ if (!len)
+ return;
+
+@@ -1795,25 +1821,26 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915)
+ "Using init OTP fragment to deassert reset\n");
+
+ /* Copy the fragment, update seq byte and terminate it */
+- init_otp = (u8 *)i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+- i915->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
+- if (!i915->vbt.dsi.deassert_seq)
++ init_otp = (u8 *)panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
++ panel->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
++ if (!panel->vbt.dsi.deassert_seq)
+ return;
+- i915->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
+- i915->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
++ panel->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
++ panel->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
+ /* Use the copy for deassert */
+- i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
+- i915->vbt.dsi.deassert_seq;
++ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
++ panel->vbt.dsi.deassert_seq;
+ /* Replace the last byte of the fragment with init OTP seq byte */
+ init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
+ /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
+- i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
++ panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+ }
+
+ static void
+-parse_mipi_sequence(struct drm_i915_private *i915)
++parse_mipi_sequence(struct drm_i915_private *i915,
++ struct intel_panel *panel)
+ {
+- int panel_type = i915->vbt.panel_type;
++ int panel_type = panel->vbt.panel_type;
+ const struct bdb_mipi_sequence *sequence;
+ const u8 *seq_data;
+ u32 seq_size;
+@@ -1821,7 +1848,7 @@ parse_mipi_sequence(struct drm_i915_private *i915)
+ int index = 0;
+
+ /* Only our generic panel driver uses the sequence block. */
+- if (i915->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
++ if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
+ return;
+
+ sequence = find_section(i915, BDB_MIPI_SEQUENCE);
+@@ -1867,7 +1894,7 @@ parse_mipi_sequence(struct drm_i915_private *i915)
+ drm_dbg_kms(&i915->drm,
+ "Unsupported sequence %u\n", seq_id);
+
+- i915->vbt.dsi.sequence[seq_id] = data + index;
++ panel->vbt.dsi.sequence[seq_id] = data + index;
+
+ if (sequence->version >= 3)
+ index = goto_next_sequence_v3(data, index, seq_size);
+@@ -1880,18 +1907,18 @@ parse_mipi_sequence(struct drm_i915_private *i915)
+ }
+ }
+
+- i915->vbt.dsi.data = data;
+- i915->vbt.dsi.size = seq_size;
+- i915->vbt.dsi.seq_version = sequence->version;
++ panel->vbt.dsi.data = data;
++ panel->vbt.dsi.size = seq_size;
++ panel->vbt.dsi.seq_version = sequence->version;
+
+- fixup_mipi_sequences(i915);
++ fixup_mipi_sequences(i915, panel);
+
+ drm_dbg(&i915->drm, "MIPI related VBT parsing complete\n");
+ return;
+
+ err:
+ kfree(data);
+- memset(i915->vbt.dsi.sequence, 0, sizeof(i915->vbt.dsi.sequence));
++ memset(panel->vbt.dsi.sequence, 0, sizeof(panel->vbt.dsi.sequence));
+ }
+
+ static void
+@@ -2645,15 +2672,6 @@ init_vbt_defaults(struct drm_i915_private *i915)
+ {
+ i915->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
+
+- /* Default to having backlight */
+- i915->vbt.backlight.present = true;
+-
+- /* LFP panel data */
+- i915->vbt.lvds_dither = 1;
+-
+- /* SDVO panel data */
+- i915->vbt.sdvo_lvds_vbt_mode = NULL;
+-
+ /* general features */
+ i915->vbt.int_tv_support = 1;
+ i915->vbt.int_crt_support = 1;
+@@ -2673,6 +2691,17 @@ init_vbt_defaults(struct drm_i915_private *i915)
+ i915->vbt.lvds_ssc_freq);
+ }
+
++/* Common defaults which may be overridden by VBT. */
++static void
++init_vbt_panel_defaults(struct intel_panel *panel)
++{
++ /* Default to having backlight */
++ panel->vbt.backlight.present = true;
++
++ /* LFP panel data */
++ panel->vbt.lvds_dither = true;
++}
++
+ /* Defaults to initialize only if there is no VBT. */
+ static void
+ init_vbt_missing_defaults(struct drm_i915_private *i915)
+@@ -2959,17 +2988,7 @@ void intel_bios_init(struct drm_i915_private *i915)
+ /* Grab useful general definitions */
+ parse_general_features(i915);
+ parse_general_definitions(i915);
+- parse_panel_options(i915);
+- parse_generic_dtd(i915);
+- parse_lfp_data(i915);
+- parse_lfp_backlight(i915);
+- parse_sdvo_panel_data(i915);
+ parse_driver_features(i915);
+- parse_power_conservation_features(i915);
+- parse_edp(i915);
+- parse_psr(i915);
+- parse_mipi_config(i915);
+- parse_mipi_sequence(i915);
+
+ /* Depends on child device list */
+ parse_compression_parameters(i915);
+@@ -2988,6 +3007,24 @@ out:
+ kfree(oprom_vbt);
+ }
+
++void intel_bios_init_panel(struct drm_i915_private *i915,
++ struct intel_panel *panel)
++{
++ init_vbt_panel_defaults(panel);
++
++ parse_panel_options(i915, panel);
++ parse_generic_dtd(i915, panel);
++ parse_lfp_data(i915, panel);
++ parse_lfp_backlight(i915, panel);
++ parse_sdvo_panel_data(i915, panel);
++ parse_panel_driver_features(i915, panel);
++ parse_power_conservation_features(i915, panel);
++ parse_edp(i915, panel);
++ parse_psr(i915, panel);
++ parse_mipi_config(i915, panel);
++ parse_mipi_sequence(i915, panel);
++}
++
+ /**
+ * intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
+ * @i915: i915 device instance
+@@ -3007,19 +3044,22 @@ void intel_bios_driver_remove(struct drm_i915_private *i915)
+ list_del(&entry->node);
+ kfree(entry);
+ }
++}
+
+- kfree(i915->vbt.sdvo_lvds_vbt_mode);
+- i915->vbt.sdvo_lvds_vbt_mode = NULL;
+- kfree(i915->vbt.lfp_lvds_vbt_mode);
+- i915->vbt.lfp_lvds_vbt_mode = NULL;
+- kfree(i915->vbt.dsi.data);
+- i915->vbt.dsi.data = NULL;
+- kfree(i915->vbt.dsi.pps);
+- i915->vbt.dsi.pps = NULL;
+- kfree(i915->vbt.dsi.config);
+- i915->vbt.dsi.config = NULL;
+- kfree(i915->vbt.dsi.deassert_seq);
+- i915->vbt.dsi.deassert_seq = NULL;
++void intel_bios_fini_panel(struct intel_panel *panel)
++{
++ kfree(panel->vbt.sdvo_lvds_vbt_mode);
++ panel->vbt.sdvo_lvds_vbt_mode = NULL;
++ kfree(panel->vbt.lfp_lvds_vbt_mode);
++ panel->vbt.lfp_lvds_vbt_mode = NULL;
++ kfree(panel->vbt.dsi.data);
++ panel->vbt.dsi.data = NULL;
++ kfree(panel->vbt.dsi.pps);
++ panel->vbt.dsi.pps = NULL;
++ kfree(panel->vbt.dsi.config);
++ panel->vbt.dsi.config = NULL;
++ kfree(panel->vbt.dsi.deassert_seq);
++ panel->vbt.dsi.deassert_seq = NULL;
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
+index 4709c4d298059..86129f015718d 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.h
++++ b/drivers/gpu/drm/i915/display/intel_bios.h
+@@ -36,6 +36,7 @@ struct drm_i915_private;
+ struct intel_bios_encoder_data;
+ struct intel_crtc_state;
+ struct intel_encoder;
++struct intel_panel;
+ enum port;
+
+ enum intel_backlight_type {
+@@ -230,6 +231,9 @@ struct mipi_pps_data {
+ } __packed;
+
+ void intel_bios_init(struct drm_i915_private *dev_priv);
++void intel_bios_init_panel(struct drm_i915_private *dev_priv,
++ struct intel_panel *panel);
++void intel_bios_fini_panel(struct intel_panel *panel);
+ void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
+ bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index 9e6fa59eabba7..333871cf3a2c5 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -3433,26 +3433,8 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
+ pipe_config->has_audio =
+ intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
+
+- if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
+- pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
+- /*
+- * This is a big fat ugly hack.
+- *
+- * Some machines in UEFI boot mode provide us a VBT that has 18
+- * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+- * unknown we fail to light up. Yet the same BIOS boots up with
+- * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+- * max, not what it tells us to use.
+- *
+- * Note: This will still be broken if the eDP panel is not lit
+- * up by the BIOS, and thus we can't get the mode at module
+- * load.
+- */
+- drm_dbg_kms(&dev_priv->drm,
+- "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+- dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
+- }
++ if (encoder->type == INTEL_OUTPUT_EDP)
++ intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
+
+ ddi_dotclock_get(pipe_config);
+
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+index 85f58dd3df722..b490acd0ab691 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+@@ -1062,17 +1062,18 @@ bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
+
+ static bool use_edp_hobl(struct intel_encoder *encoder)
+ {
+- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
++ struct intel_connector *connector = intel_dp->attached_connector;
+
+- return i915->vbt.edp.hobl && !intel_dp->hobl_failed;
++ return connector->panel.vbt.edp.hobl && !intel_dp->hobl_failed;
+ }
+
+ static bool use_edp_low_vswing(struct intel_encoder *encoder)
+ {
+- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
++ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
++ struct intel_connector *connector = intel_dp->attached_connector;
+
+- return i915->vbt.edp.low_vswing;
++ return connector->panel.vbt.edp.low_vswing;
+ }
+
+ static const struct intel_ddi_buf_trans *
+diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
+index 408152f9f46a4..e2561c5d4953c 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -279,6 +279,73 @@ struct intel_panel_bl_funcs {
+ u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
+ };
+
++enum drrs_type {
++ DRRS_TYPE_NONE,
++ DRRS_TYPE_STATIC,
++ DRRS_TYPE_SEAMLESS,
++};
++
++struct intel_vbt_panel_data {
++ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
++ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
++
++ /* Feature bits */
++ unsigned int panel_type:4;
++ unsigned int lvds_dither:1;
++ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
++
++ u8 seamless_drrs_min_refresh_rate;
++ enum drrs_type drrs_type;
++
++ struct {
++ int rate;
++ int lanes;
++ int preemphasis;
++ int vswing;
++ int bpp;
++ struct edp_power_seq pps;
++ u8 drrs_msa_timing_delay;
++ bool low_vswing;
++ bool initialized;
++ bool hobl;
++ } edp;
++
++ struct {
++ bool enable;
++ bool full_link;
++ bool require_aux_wakeup;
++ int idle_frames;
++ int tp1_wakeup_time_us;
++ int tp2_tp3_wakeup_time_us;
++ int psr2_tp2_tp3_wakeup_time_us;
++ } psr;
++
++ struct {
++ u16 pwm_freq_hz;
++ u16 brightness_precision_bits;
++ bool present;
++ bool active_low_pwm;
++ u8 min_brightness; /* min_brightness/255 of max */
++ u8 controller; /* brightness controller number */
++ enum intel_backlight_type type;
++ } backlight;
++
++ /* MIPI DSI */
++ struct {
++ u16 panel_id;
++ struct mipi_config *config;
++ struct mipi_pps_data *pps;
++ u16 bl_ports;
++ u16 cabc_ports;
++ u8 seq_version;
++ u32 size;
++ u8 *data;
++ const u8 *sequence[MIPI_SEQ_MAX];
++ u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
++ enum drm_panel_orientation orientation;
++ } dsi;
++};
++
+ struct intel_panel {
+ struct list_head fixed_modes;
+
+@@ -318,6 +385,8 @@ struct intel_panel {
+ const struct intel_panel_bl_funcs *pwm_funcs;
+ void (*power)(struct intel_connector *, bool enable);
+ } backlight;
++
++ struct intel_vbt_panel_data vbt;
+ };
+
+ struct intel_digital_port;
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index fe8b6b72970a2..0efec6023fbe8 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -1246,11 +1246,12 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
+ if (intel_dp_is_edp(intel_dp)) {
+ /* Get bpp from vbt only for panels that dont have bpp in edid */
+ if (intel_connector->base.display_info.bpc == 0 &&
+- dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
++ intel_connector->panel.vbt.edp.bpp &&
++ intel_connector->panel.vbt.edp.bpp < bpp) {
+ drm_dbg_kms(&dev_priv->drm,
+ "clamping bpp for eDP panel to BIOS-provided %i\n",
+- dev_priv->vbt.edp.bpp);
+- bpp = dev_priv->vbt.edp.bpp;
++ intel_connector->panel.vbt.edp.bpp);
++ bpp = intel_connector->panel.vbt.edp.bpp;
+ }
+ }
+
+@@ -1907,7 +1908,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
+ }
+
+ if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
+- pipe_config->msa_timing_delay = i915->vbt.edp.drrs_msa_timing_delay;
++ pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay;
+
+ pipe_config->has_drrs = true;
+
+@@ -2737,6 +2738,33 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
+ DRM_MODE_ARG(mode));
+ }
+
++void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
++{
++ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
++ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
++ struct intel_connector *connector = intel_dp->attached_connector;
++
++ if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) {
++ /*
++ * This is a big fat ugly hack.
++ *
++ * Some machines in UEFI boot mode provide us a VBT that has 18
++ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
++ * unknown we fail to light up. Yet the same BIOS boots up with
++ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
++ * max, not what it tells us to use.
++ *
++ * Note: This will still be broken if the eDP panel is not lit
++ * up by the BIOS, and thus we can't get the mode at module
++ * load.
++ */
++ drm_dbg_kms(&dev_priv->drm,
++ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
++ pipe_bpp, connector->panel.vbt.edp.bpp);
++ connector->panel.vbt.edp.bpp = pipe_bpp;
++ }
++}
++
+ static void intel_edp_mso_init(struct intel_dp *intel_dp)
+ {
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+@@ -5212,8 +5240,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ }
+ intel_connector->edid = edid;
+
++ intel_bios_init_panel(dev_priv, &intel_connector->panel);
++
+ intel_panel_add_edid_fixed_modes(intel_connector,
+- dev_priv->vbt.drrs_type != DRRS_TYPE_NONE);
++ intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE);
+
+ /* MSO requires information from the EDID */
+ intel_edp_mso_init(intel_dp);
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
+index d457e17bdc57e..a54902c713a34 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.h
++++ b/drivers/gpu/drm/i915/display/intel_dp.h
+@@ -29,6 +29,7 @@ struct link_config_limits {
+ int min_bpp, max_bpp;
+ };
+
++void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
+ void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct link_config_limits *limits);
+@@ -63,6 +64,7 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
+ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
++void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
+ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
+ void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
+ int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+index fb6cf30ee6281..c92d5bb2326a3 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+@@ -370,7 +370,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
+ int ret;
+
+ ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
+- i915->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
++ panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
+ &current_level, &current_mode);
+ if (ret < 0)
+ return ret;
+@@ -454,7 +454,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
+ case INTEL_DP_AUX_BACKLIGHT_OFF:
+ return -ENODEV;
+ case INTEL_DP_AUX_BACKLIGHT_AUTO:
+- switch (i915->vbt.backlight.type) {
++ switch (panel->vbt.backlight.type) {
+ case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE:
+ try_vesa_interface = true;
+ break;
+@@ -466,7 +466,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
+ }
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_ON:
+- if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
++ if (panel->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
+ try_intel_interface = true;
+
+ try_vesa_interface = true;
+diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
+index 166caf293f7bc..7da4a9cbe4ba4 100644
+--- a/drivers/gpu/drm/i915/display/intel_drrs.c
++++ b/drivers/gpu/drm/i915/display/intel_drrs.c
+@@ -217,9 +217,6 @@ static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
+ {
+ struct intel_crtc *crtc;
+
+- if (dev_priv->vbt.drrs_type != DRRS_TYPE_SEAMLESS)
+- return;
+-
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ unsigned int frontbuffer_bits;
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
+index 389a8c24cdc1e..35e121cd226c5 100644
+--- a/drivers/gpu/drm/i915/display/intel_dsi.c
++++ b/drivers/gpu/drm/i915/display/intel_dsi.c
+@@ -102,7 +102,7 @@ intel_dsi_get_panel_orientation(struct intel_connector *connector)
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum drm_panel_orientation orientation;
+
+- orientation = dev_priv->vbt.dsi.orientation;
++ orientation = connector->panel.vbt.dsi.orientation;
+ if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return orientation;
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+index 7d234429e71ef..1bc7118c56a2a 100644
+--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
++++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+@@ -160,12 +160,10 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
+ static int dcs_setup_backlight(struct intel_connector *connector,
+ enum pipe unused)
+ {
+- struct drm_device *dev = connector->base.dev;
+- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_panel *panel = &connector->panel;
+
+- if (dev_priv->vbt.backlight.brightness_precision_bits > 8)
+- panel->backlight.max = (1 << dev_priv->vbt.backlight.brightness_precision_bits) - 1;
++ if (panel->vbt.backlight.brightness_precision_bits > 8)
++ panel->backlight.max = (1 << panel->vbt.backlight.brightness_precision_bits) - 1;
+ else
+ panel->backlight.max = PANEL_PWM_MAX_VALUE;
+
+@@ -185,11 +183,10 @@ static const struct intel_panel_bl_funcs dcs_bl_funcs = {
+ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
+ {
+ struct drm_device *dev = intel_connector->base.dev;
+- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
+ struct intel_panel *panel = &intel_connector->panel;
+
+- if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
++ if (panel->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
+ return -ENODEV;
+
+ if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
+diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+index dd24aef925f2e..75e8cc4337c93 100644
+--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
++++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+@@ -240,9 +240,10 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
+ return data;
+ }
+
+-static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
++static void vlv_exec_gpio(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
+ {
++ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct gpio_map *map;
+ u16 pconf0, padval;
+ u32 tmp;
+@@ -256,7 +257,7 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
+
+ map = &vlv_gpio_table[gpio_index];
+
+- if (dev_priv->vbt.dsi.seq_version >= 3) {
++ if (connector->panel.vbt.dsi.seq_version >= 3) {
+ /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
+ port = IOSF_PORT_GPIO_NC;
+ } else {
+@@ -287,14 +288,15 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
+ vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
+ }
+
+-static void chv_exec_gpio(struct drm_i915_private *dev_priv,
++static void chv_exec_gpio(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
+ {
++ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u16 cfg0, cfg1;
+ u16 family_num;
+ u8 port;
+
+- if (dev_priv->vbt.dsi.seq_version >= 3) {
++ if (connector->panel.vbt.dsi.seq_version >= 3) {
+ if (gpio_index >= CHV_GPIO_IDX_START_SE) {
+ /* XXX: it's unclear whether 255->57 is part of SE. */
+ gpio_index -= CHV_GPIO_IDX_START_SE;
+@@ -340,9 +342,10 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
+ vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
+ }
+
+-static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
++static void bxt_exec_gpio(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
+ {
++ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ /* XXX: this table is a quick ugly hack. */
+ static struct gpio_desc *bxt_gpio_table[U8_MAX + 1];
+ struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
+@@ -366,9 +369,11 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
+ gpiod_set_value(gpio_desc, value);
+ }
+
+-static void icl_exec_gpio(struct drm_i915_private *dev_priv,
++static void icl_exec_gpio(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
+ {
++ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
++
+ drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
+ }
+
+@@ -376,18 +381,19 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+ {
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
++ struct intel_connector *connector = intel_dsi->attached_connector;
+ u8 gpio_source, gpio_index = 0, gpio_number;
+ bool value;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+- if (dev_priv->vbt.dsi.seq_version >= 3)
++ if (connector->panel.vbt.dsi.seq_version >= 3)
+ gpio_index = *data++;
+
+ gpio_number = *data++;
+
+ /* gpio source in sequence v2 only */
+- if (dev_priv->vbt.dsi.seq_version == 2)
++ if (connector->panel.vbt.dsi.seq_version == 2)
+ gpio_source = (*data >> 1) & 3;
+ else
+ gpio_source = 0;
+@@ -396,13 +402,13 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+ value = *data++ & 1;
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+- icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
++ icl_exec_gpio(connector, gpio_source, gpio_index, value);
+ else if (IS_VALLEYVIEW(dev_priv))
+- vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
++ vlv_exec_gpio(connector, gpio_source, gpio_number, value);
+ else if (IS_CHERRYVIEW(dev_priv))
+- chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
++ chv_exec_gpio(connector, gpio_source, gpio_number, value);
+ else
+- bxt_exec_gpio(dev_priv, gpio_source, gpio_index, value);
++ bxt_exec_gpio(connector, gpio_source, gpio_index, value);
+
+ return data;
+ }
+@@ -585,14 +591,15 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id)
+ {
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
++ struct intel_connector *connector = intel_dsi->attached_connector;
+ const u8 *data;
+ fn_mipi_elem_exec mipi_elem_exec;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+- seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
++ seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence)))
+ return;
+
+- data = dev_priv->vbt.dsi.sequence[seq_id];
++ data = connector->panel.vbt.dsi.sequence[seq_id];
+ if (!data)
+ return;
+
+@@ -605,7 +612,7 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
+ data++;
+
+ /* Skip Size of Sequence. */
+- if (dev_priv->vbt.dsi.seq_version >= 3)
++ if (connector->panel.vbt.dsi.seq_version >= 3)
+ data += 4;
+
+ while (1) {
+@@ -621,7 +628,7 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
+ mipi_elem_exec = NULL;
+
+ /* Size of Operation. */
+- if (dev_priv->vbt.dsi.seq_version >= 3)
++ if (connector->panel.vbt.dsi.seq_version >= 3)
+ operation_size = *data++;
+
+ if (mipi_elem_exec) {
+@@ -669,10 +676,10 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+
+ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
+ {
+- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
++ struct intel_connector *connector = intel_dsi->attached_connector;
+
+ /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
+- if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
++ if (is_vid_mode(intel_dsi) && connector->panel.vbt.dsi.seq_version >= 3)
+ return;
+
+ msleep(msec);
+@@ -734,9 +741,10 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
+ {
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+- struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
+- struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
++ struct intel_connector *connector = intel_dsi->attached_connector;
++ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
++ struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps;
++ struct drm_display_mode *mode = connector->panel.vbt.lfp_lvds_vbt_mode;
+ u16 burst_mode_ratio;
+ enum port port;
+
+@@ -872,7 +880,8 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
+ {
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
++ struct intel_connector *connector = intel_dsi->attached_connector;
++ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
+ enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ bool want_backlight_gpio = false;
+ bool want_panel_gpio = false;
+@@ -927,7 +936,8 @@ void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
+ {
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
++ struct intel_connector *connector = intel_dsi->attached_connector;
++ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
+
+ if (intel_dsi->gpio_panel) {
+ gpiod_put(intel_dsi->gpio_panel);
+diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
+index e8478161f8b9b..9f250a70519aa 100644
+--- a/drivers/gpu/drm/i915/display/intel_lvds.c
++++ b/drivers/gpu/drm/i915/display/intel_lvds.c
+@@ -809,7 +809,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
+ else
+ val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
+ if (val == 0)
+- val = dev_priv->vbt.bios_lvds_val;
++ val = connector->panel.vbt.bios_lvds_val;
+
+ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+ }
+@@ -967,9 +967,11 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
+ }
+ intel_connector->edid = edid;
+
++ intel_bios_init_panel(dev_priv, &intel_connector->panel);
++
+ /* Try EDID first */
+ intel_panel_add_edid_fixed_modes(intel_connector,
+- dev_priv->vbt.drrs_type != DRRS_TYPE_NONE);
++ intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE);
+
+ /* Failed to get EDID, what about VBT? */
+ if (!intel_panel_preferred_fixed_mode(intel_connector))
+diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
+index d1d1b59102d69..d055e41185582 100644
+--- a/drivers/gpu/drm/i915/display/intel_panel.c
++++ b/drivers/gpu/drm/i915/display/intel_panel.c
+@@ -75,9 +75,8 @@ const struct drm_display_mode *
+ intel_panel_downclock_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode)
+ {
+- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *fixed_mode, *best_mode = NULL;
+- int min_vrefresh = i915->vbt.seamless_drrs_min_refresh_rate;
++ int min_vrefresh = connector->panel.vbt.seamless_drrs_min_refresh_rate;
+ int max_vrefresh = drm_mode_vrefresh(adjusted_mode);
+
+ /* pick the fixed_mode with the lowest refresh rate */
+@@ -113,13 +112,11 @@ int intel_panel_get_modes(struct intel_connector *connector)
+
+ enum drrs_type intel_panel_drrs_type(struct intel_connector *connector)
+ {
+- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+-
+ if (list_empty(&connector->panel.fixed_modes) ||
+ list_is_singular(&connector->panel.fixed_modes))
+ return DRRS_TYPE_NONE;
+
+- return i915->vbt.drrs_type;
++ return connector->panel.vbt.drrs_type;
+ }
+
+ int intel_panel_compute_config(struct intel_connector *connector,
+@@ -260,7 +257,7 @@ void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector)
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *mode;
+
+- mode = i915->vbt.lfp_lvds_vbt_mode;
++ mode = connector->panel.vbt.lfp_lvds_vbt_mode;
+ if (!mode)
+ return;
+
+@@ -274,7 +271,7 @@ void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector)
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *mode;
+
+- mode = i915->vbt.sdvo_lvds_vbt_mode;
++ mode = connector->panel.vbt.sdvo_lvds_vbt_mode;
+ if (!mode)
+ return;
+
+@@ -639,6 +636,8 @@ void intel_panel_fini(struct intel_connector *connector)
+
+ intel_backlight_destroy(panel);
+
++ intel_bios_fini_panel(panel);
++
+ list_for_each_entry_safe(fixed_mode, next, &panel->fixed_modes, head) {
+ list_del(&fixed_mode->head);
+ drm_mode_destroy(connector->base.dev, fixed_mode);
+diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
+index 5a598dd060391..a226e4e5c5698 100644
+--- a/drivers/gpu/drm/i915/display/intel_pps.c
++++ b/drivers/gpu/drm/i915/display/intel_pps.c
+@@ -209,7 +209,8 @@ static int
+ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
+ {
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+- int backlight_controller = dev_priv->vbt.backlight.controller;
++ struct intel_connector *connector = intel_dp->attached_connector;
++ int backlight_controller = connector->panel.vbt.backlight.controller;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+@@ -1159,53 +1160,84 @@ intel_pps_verify_state(struct intel_dp *intel_dp)
+ }
+ }
+
+-static void pps_init_delays(struct intel_dp *intel_dp)
++static void pps_init_delays_cur(struct intel_dp *intel_dp,
++ struct edp_power_seq *cur)
+ {
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+- struct edp_power_seq cur, vbt, spec,
+- *final = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+- /* already initialized? */
+- if (final->t11_t12 != 0)
+- return;
++ intel_pps_readout_hw_state(intel_dp, cur);
++
++ intel_pps_dump_state(intel_dp, "cur", cur);
++}
+
+- intel_pps_readout_hw_state(intel_dp, &cur);
++static void pps_init_delays_vbt(struct intel_dp *intel_dp,
++ struct edp_power_seq *vbt)
++{
++ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
++ struct intel_connector *connector = intel_dp->attached_connector;
+
+- intel_pps_dump_state(intel_dp, "cur", &cur);
++ *vbt = connector->panel.vbt.edp.pps;
+
+- vbt = dev_priv->vbt.edp.pps;
+ /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
+ * of 500ms appears to be too short. Ocassionally the panel
+ * just fails to power back on. Increasing the delay to 800ms
+ * seems sufficient to avoid this problem.
+ */
+ if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
+- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
++ vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
+ drm_dbg_kms(&dev_priv->drm,
+ "Increasing T12 panel delay as per the quirk to %d\n",
+- vbt.t11_t12);
++ vbt->t11_t12);
+ }
++
+ /* T11_T12 delay is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+- vbt.t11_t12 += 100 * 10;
++ vbt->t11_t12 += 100 * 10;
++
++ intel_pps_dump_state(intel_dp, "vbt", vbt);
++}
++
++static void pps_init_delays_spec(struct intel_dp *intel_dp,
++ struct edp_power_seq *spec)
++{
++ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
++
++ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+- spec.t1_t3 = 210 * 10;
+- spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+- spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+- spec.t10 = 500 * 10;
++ spec->t1_t3 = 210 * 10;
++ spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
++ spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
++ spec->t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+- spec.t11_t12 = (510 + 100) * 10;
++ spec->t11_t12 = (510 + 100) * 10;
++
++ intel_pps_dump_state(intel_dp, "spec", spec);
++}
++
++static void pps_init_delays(struct intel_dp *intel_dp)
++{
++ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
++ struct edp_power_seq cur, vbt, spec,
++ *final = &intel_dp->pps.pps_delays;
++
++ lockdep_assert_held(&dev_priv->pps_mutex);
++
++ /* already initialized? */
++ if (final->t11_t12 != 0)
++ return;
+
+- intel_pps_dump_state(intel_dp, "vbt", &vbt);
++ pps_init_delays_cur(intel_dp, &cur);
++ pps_init_delays_vbt(intel_dp, &vbt);
++ pps_init_delays_spec(intel_dp, &spec);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index 06db407e2749f..8f09203e0cf03 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -86,10 +86,13 @@
+
+ static bool psr_global_enabled(struct intel_dp *intel_dp)
+ {
++ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+ case I915_PSR_DEBUG_DEFAULT:
++ if (i915->params.enable_psr == -1)
++ return connector->panel.vbt.psr.enable;
+ return i915->params.enable_psr;
+ case I915_PSR_DEBUG_DISABLE:
+ return false;
+@@ -399,6 +402,7 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+
+ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
+ {
++ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 val = 0;
+
+@@ -411,20 +415,20 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
+ goto check_tp3_sel;
+ }
+
+- if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
++ if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
+ val |= EDP_PSR_TP1_TIME_0us;
+- else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
++ else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
+ val |= EDP_PSR_TP1_TIME_100us;
+- else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
++ else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
+ val |= EDP_PSR_TP1_TIME_500us;
+ else
+ val |= EDP_PSR_TP1_TIME_2500us;
+
+- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
++ if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
++ else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+ val |= EDP_PSR_TP2_TP3_TIME_100us;
+- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
++ else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ val |= EDP_PSR_TP2_TP3_TIME_500us;
+ else
+ val |= EDP_PSR_TP2_TP3_TIME_2500us;
+@@ -441,13 +445,14 @@ check_tp3_sel:
+
+ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
+ {
++ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int idle_frames;
+
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
+ */
+- idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
++ idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
+ idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
+
+ if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
+@@ -483,18 +488,19 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
+
+ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
+ {
++ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 val = 0;
+
+ if (dev_priv->params.psr_safest_params)
+ return EDP_PSR2_TP2_TIME_2500us;
+
+- if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
+- dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
++ if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
++ connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
+ val |= EDP_PSR2_TP2_TIME_50us;
+- else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
++ else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
+ val |= EDP_PSR2_TP2_TIME_100us;
+- else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
++ else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
+ val |= EDP_PSR2_TP2_TIME_500us;
+ else
+ val |= EDP_PSR2_TP2_TIME_2500us;
+@@ -2344,6 +2350,7 @@ unlock:
+ */
+ void intel_psr_init(struct intel_dp *intel_dp)
+ {
++ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+@@ -2367,14 +2374,10 @@ void intel_psr_init(struct intel_dp *intel_dp)
+
+ intel_dp->psr.source_support = true;
+
+- if (dev_priv->params.enable_psr == -1)
+- if (!dev_priv->vbt.psr.enable)
+- dev_priv->params.enable_psr = 0;
+-
+ /* Set link_standby x link_off defaults */
+ if (DISPLAY_VER(dev_priv) < 12)
+ /* For new platforms up to TGL let's respect VBT back again */
+- intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
++ intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
+
+ INIT_WORK(&intel_dp->psr.work, intel_psr_work);
+ INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
+diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
+index d81855d57cdc9..14a64bd61176d 100644
+--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
+@@ -2869,6 +2869,7 @@ static bool
+ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+ {
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
++ struct drm_i915_private *i915 = to_i915(encoder->dev);
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+@@ -2900,6 +2901,8 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
++ intel_bios_init_panel(i915, &intel_connector->panel);
++
+ /*
+ * Fetch modes from VBT. For SDVO prefer the VBT mode since some
+ * SDVO->LVDS transcoders can't cope with the EDID mode.
+diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
+index 1954f07f0d3ec..02f75e95b2ec1 100644
+--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
+@@ -782,6 +782,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+ {
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
++ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum port port;
+@@ -838,7 +839,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+ * the delay in that case. If there is no deassert-seq, then an
+ * unconditional msleep is used to give the panel time to power-on.
+ */
+- if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
++ if (connector->panel.vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
+ intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ } else {
+@@ -1690,7 +1691,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
+ {
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
++ struct intel_connector *connector = intel_dsi->attached_connector;
++ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
+ u32 tlpx_ns, extra_byte_count, tlpx_ui;
+ u32 ui_num, ui_den;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+@@ -1924,13 +1926,22 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
+
+ intel_dsi->panel_power_off_time = ktime_get_boottime();
+
+- if (dev_priv->vbt.dsi.config->dual_link)
++ intel_bios_init_panel(dev_priv, &intel_connector->panel);
++
++ if (intel_connector->panel.vbt.dsi.config->dual_link)
+ intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
+ else
+ intel_dsi->ports = BIT(port);
+
+- intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
+- intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
++ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
++ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
++
++ intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
++
++ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
++ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
++
++ intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
+
+ /* Create a DSI host (and a device) for each port. */
+ for_each_dsi_port(port, intel_dsi->ports) {
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+index 321af109d484f..8da42af0256ab 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -1269,6 +1269,10 @@ static void i915_gem_context_release_work(struct work_struct *work)
+ trace_i915_context_free(ctx);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+
++ spin_lock(&ctx->i915->gem.contexts.lock);
++ list_del(&ctx->link);
++ spin_unlock(&ctx->i915->gem.contexts.lock);
++
+ if (ctx->syncobj)
+ drm_syncobj_put(ctx->syncobj);
+
+@@ -1514,10 +1518,6 @@ static void context_close(struct i915_gem_context *ctx)
+
+ ctx->file_priv = ERR_PTR(-EBADF);
+
+- spin_lock(&ctx->i915->gem.contexts.lock);
+- list_del(&ctx->link);
+- spin_unlock(&ctx->i915->gem.contexts.lock);
+-
+ client = ctx->client;
+ if (client) {
+ spin_lock(&client->ctx_lock);
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 5184d70d48382..554d79bc0312d 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -194,12 +194,6 @@ struct drm_i915_display_funcs {
+
+ #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
+
+-enum drrs_type {
+- DRRS_TYPE_NONE,
+- DRRS_TYPE_STATIC,
+- DRRS_TYPE_SEAMLESS,
+-};
+-
+ #define QUIRK_LVDS_SSC_DISABLE (1<<1)
+ #define QUIRK_INVERT_BRIGHTNESS (1<<2)
+ #define QUIRK_BACKLIGHT_PRESENT (1<<3)
+@@ -308,76 +302,19 @@ struct intel_vbt_data {
+ /* bdb version */
+ u16 version;
+
+- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+-
+ /* Feature bits */
+ unsigned int int_tv_support:1;
+- unsigned int lvds_dither:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int int_lvds_support:1;
+ unsigned int display_clock_mode:1;
+ unsigned int fdi_rx_polarity_inverted:1;
+- unsigned int panel_type:4;
+ int lvds_ssc_freq;
+- unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ enum drm_panel_orientation orientation;
+
+ bool override_afc_startup;
+ u8 override_afc_startup_val;
+
+- u8 seamless_drrs_min_refresh_rate;
+- enum drrs_type drrs_type;
+-
+- struct {
+- int rate;
+- int lanes;
+- int preemphasis;
+- int vswing;
+- int bpp;
+- struct edp_power_seq pps;
+- u8 drrs_msa_timing_delay;
+- bool low_vswing;
+- bool initialized;
+- bool hobl;
+- } edp;
+-
+- struct {
+- bool enable;
+- bool full_link;
+- bool require_aux_wakeup;
+- int idle_frames;
+- int tp1_wakeup_time_us;
+- int tp2_tp3_wakeup_time_us;
+- int psr2_tp2_tp3_wakeup_time_us;
+- } psr;
+-
+- struct {
+- u16 pwm_freq_hz;
+- u16 brightness_precision_bits;
+- bool present;
+- bool active_low_pwm;
+- u8 min_brightness; /* min_brightness/255 of max */
+- u8 controller; /* brightness controller number */
+- enum intel_backlight_type type;
+- } backlight;
+-
+- /* MIPI DSI */
+- struct {
+- u16 panel_id;
+- struct mipi_config *config;
+- struct mipi_pps_data *pps;
+- u16 bl_ports;
+- u16 cabc_ports;
+- u8 seq_version;
+- u32 size;
+- u8 *data;
+- const u8 *sequence[MIPI_SEQ_MAX];
+- u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
+- enum drm_panel_orientation orientation;
+- } dsi;
+-
+ int crt_ddc_pin;
+
+ struct list_head display_devices;
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 702e5b89be226..b605d0ceaefad 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1191,7 +1191,8 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
+
+ intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
+
+- i915_gem_drain_freed_objects(dev_priv);
++ /* Flush any outstanding work, including i915_gem_context.release_work. */
++ i915_gem_drain_workqueue(dev_priv);
+
+ drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
+ }
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+index 5d7504a72b11c..e244aa408d9d4 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+@@ -151,7 +151,7 @@ static void mtk_dither_config(struct device *dev, unsigned int w,
+ {
+ struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
+
+- mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
++ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs,
+ DISP_REG_DITHER_CFG);
+ mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG,
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index af2f123e9a9a9..9a3b86c29b503 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -685,6 +685,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
+ if (--dsi->refcount != 0)
+ return;
+
++ /*
++ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
++ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
++ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
++ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
++ * after dsi is fully set.
++ */
++ mtk_dsi_stop(dsi);
++
++ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
+ mtk_dsi_reset_engine(dsi);
+ mtk_dsi_lane0_ulp_mode_enter(dsi);
+ mtk_dsi_clk_ulp_mode_enter(dsi);
+@@ -735,17 +745,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
+ if (!dsi->enabled)
+ return;
+
+- /*
+- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
+- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+- * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
+- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
+- * after dsi is fully set.
+- */
+- mtk_dsi_stop(dsi);
+-
+- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
+-
+ dsi->enabled = false;
+ }
+
+@@ -808,10 +807,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+
+ static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
+ .attach = mtk_dsi_bridge_attach,
++ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_disable = mtk_dsi_bridge_atomic_disable,
++ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_enable = mtk_dsi_bridge_atomic_enable,
+ .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
+ .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
++ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .mode_set = mtk_dsi_bridge_mode_set,
+ };
+
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 4a2e580a2f7b7..0e001ce8a40fd 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2136,7 +2136,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
+ .enable = 200,
+ .disable = 20,
+ },
+- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
++ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index c204e9b95c1f7..518ee13b1d6f4 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -283,8 +283,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
+ return ret;
+ }
+
+-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
+- struct drm_display_mode *mode)
++static enum drm_mode_status
++cdn_dp_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
+ {
+ struct cdn_dp_device *dp = connector_to_dp(connector);
+ struct drm_display_info *display_info = &dp->connector.display_info;
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 547ae334e5cd8..027029efb0088 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2309,7 +2309,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
+ bool fb_overlap_ok)
+ {
+ struct resource *iter, *shadow;
+- resource_size_t range_min, range_max, start;
++ resource_size_t range_min, range_max, start, end;
+ const char *dev_n = dev_name(&device_obj->device);
+ int retval;
+
+@@ -2344,6 +2344,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
+ range_max = iter->end;
+ start = (range_min + align - 1) & ~(align - 1);
+ for (; start + size - 1 <= range_max; start += align) {
++ end = start + size - 1;
++
++ /* Skip the whole fb_mmio region if not fb_overlap_ok */
++ if (!fb_overlap_ok && fb_mmio &&
++ (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
++ ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
++ continue;
++
+ shadow = __request_region(iter, start, size, NULL,
+ IORESOURCE_BUSY);
+ if (!shadow)
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index e47fa34656717..3082183bd66a4 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -1583,7 +1583,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
+ if (i2c_imx->dma)
+ i2c_imx_dma_free(i2c_imx);
+
+- if (ret == 0) {
++ if (ret >= 0) {
+ /* setup chip registers to defaults */
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
+diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
+index 8716032f030a0..ad5efd7497d1c 100644
+--- a/drivers/i2c/busses/i2c-mlxbf.c
++++ b/drivers/i2c/busses/i2c-mlxbf.c
+@@ -6,6 +6,7 @@
+ */
+
+ #include <linux/acpi.h>
++#include <linux/bitfield.h>
+ #include <linux/delay.h>
+ #include <linux/err.h>
+ #include <linux/interrupt.h>
+@@ -63,13 +64,14 @@
+ */
+ #define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000)
+ /* Reference clock for Bluefield - 156 MHz. */
+-#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000)
++#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL
+
+ /* Constant used to determine the PLL frequency. */
+-#define MLNXBF_I2C_COREPLL_CONST 16384
++#define MLNXBF_I2C_COREPLL_CONST 16384ULL
++
++#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL
+
+ /* PLL registers. */
+-#define MLXBF_I2C_CORE_PLL_REG0 0x0
+ #define MLXBF_I2C_CORE_PLL_REG1 0x4
+ #define MLXBF_I2C_CORE_PLL_REG2 0x8
+
+@@ -181,22 +183,15 @@
+ #define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ
+
+ /* Core PLL TYU configuration. */
+-#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0)
+-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0)
+-#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0)
+-
+-#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3
+-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16
+-#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20
++#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3)
++#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16)
++#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20)
+
+ /* Core PLL YU configuration. */
+ #define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0)
+ #define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0)
+-#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0)
++#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26)
+
+-#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0
+-#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1
+-#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26
+
+ /* Core PLL frequency. */
+ static u64 mlxbf_i2c_corepll_frequency;
+@@ -479,8 +474,6 @@ static struct mutex mlxbf_i2c_bus_lock;
+ #define MLXBF_I2C_MASK_8 GENMASK(7, 0)
+ #define MLXBF_I2C_MASK_16 GENMASK(15, 0)
+
+-#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000
+-
+ /*
+ * Function to poll a set of bits at a specific address; it checks whether
+ * the bits are equal to zero when eq_zero is set to 'true', and not equal
+@@ -669,7 +662,7 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
+ /* Clear status bits. */
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
+ /* Set the cause data. */
+- writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
++ writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
+ /* Zero PEC byte. */
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
+ /* Zero byte count. */
+@@ -738,6 +731,9 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
+ if (flags & MLXBF_I2C_F_WRITE) {
+ write_en = 1;
+ write_len += operation->length;
++ if (data_idx + operation->length >
++ MLXBF_I2C_MASTER_DATA_DESC_SIZE)
++ return -ENOBUFS;
+ memcpy(data_desc + data_idx,
+ operation->buffer, operation->length);
+ data_idx += operation->length;
+@@ -1407,24 +1403,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev,
+ return 0;
+ }
+
+-static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
++static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
+ {
+- u64 core_frequency, pad_frequency;
++ u64 core_frequency;
+ u8 core_od, core_r;
+ u32 corepll_val;
+ u16 core_f;
+
+- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
+-
+ corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
+
+ /* Get Core PLL configuration bits. */
+- core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
+- MLXBF_I2C_COREPLL_CORE_F_TYU_MASK;
+- core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) &
+- MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK;
+- core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) &
+- MLXBF_I2C_COREPLL_CORE_R_TYU_MASK;
++ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val);
++ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val);
++ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val);
+
+ /*
+ * Compute PLL output frequency as follow:
+@@ -1436,31 +1427,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
+ * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
+ * and PadFrequency, respectively.
+ */
+- core_frequency = pad_frequency * (++core_f);
++ core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
+ core_frequency /= (++core_r) * (++core_od);
+
+ return core_frequency;
+ }
+
+-static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
++static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
+ {
+ u32 corepll_reg1_val, corepll_reg2_val;
+- u64 corepll_frequency, pad_frequency;
++ u64 corepll_frequency;
+ u8 core_od, core_r;
+ u32 core_f;
+
+- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
+-
+ corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
+ corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
+
+ /* Get Core PLL configuration bits */
+- core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
+- MLXBF_I2C_COREPLL_CORE_F_YU_MASK;
+- core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) &
+- MLXBF_I2C_COREPLL_CORE_R_YU_MASK;
+- core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) &
+- MLXBF_I2C_COREPLL_CORE_OD_YU_MASK;
++ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val);
++ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val);
++ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val);
+
+ /*
+ * Compute PLL output frequency as follow:
+@@ -1472,7 +1458,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
+ * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
+ * and PadFrequency, respectively.
+ */
+- corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST;
++ corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
+ corepll_frequency /= (++core_r) * (++core_od);
+
+ return corepll_frequency;
+@@ -2180,14 +2166,14 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = {
+ [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1],
+ [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1]
+ },
+- .calculate_freq = mlxbf_calculate_freq_from_tyu
++ .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu
+ },
+ [MLXBF_I2C_CHIP_TYPE_2] = {
+ .type = MLXBF_I2C_CHIP_TYPE_2,
+ .shared_res = {
+ [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2]
+ },
+- .calculate_freq = mlxbf_calculate_freq_from_yu
++ .calculate_freq = mlxbf_i2c_calculate_freq_from_yu
+ }
+ };
+
+diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
+index 774507b54b57b..313904be5f3bd 100644
+--- a/drivers/i2c/i2c-mux.c
++++ b/drivers/i2c/i2c-mux.c
+@@ -243,9 +243,10 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
+ int (*deselect)(struct i2c_mux_core *, u32))
+ {
+ struct i2c_mux_core *muxc;
++ size_t mux_size;
+
+- muxc = devm_kzalloc(dev, struct_size(muxc, adapter, max_adapters)
+- + sizeof_priv, GFP_KERNEL);
++ mux_size = struct_size(muxc, adapter, max_adapters);
++ muxc = devm_kzalloc(dev, size_add(mux_size, sizeof_priv), GFP_KERNEL);
+ if (!muxc)
+ return NULL;
+ if (sizeof_priv)
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 861a239d905a4..3ed15e8ca6775 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -419,7 +419,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
+ {
+ unsigned long fl_sagaw, sl_sagaw;
+
+- fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0);
++ fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
+ sl_sagaw = cap_sagaw(iommu->cap);
+
+ /* Second level only. */
+diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
+index 7835bb0f32fc3..e012b21c4fd7a 100644
+--- a/drivers/media/usb/b2c2/flexcop-usb.c
++++ b/drivers/media/usb/b2c2/flexcop-usb.c
+@@ -511,7 +511,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
+
+ if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+- if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc))
++ if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc))
+ return -ENODEV;
+
+ switch (fc_usb->udev->speed) {
+diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
+index f8fdf88fb240c..ecbc46714e681 100644
+--- a/drivers/memstick/core/ms_block.c
++++ b/drivers/memstick/core/ms_block.c
+@@ -2188,7 +2188,6 @@ static void msb_remove(struct memstick_dev *card)
+
+ /* Remove the disk */
+ del_gendisk(msb->disk);
+- blk_cleanup_queue(msb->queue);
+ blk_mq_free_tag_set(&msb->tag_set);
+ msb->queue = NULL;
+
+diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
+index 725ba74ded308..72e91c06c618b 100644
+--- a/drivers/memstick/core/mspro_block.c
++++ b/drivers/memstick/core/mspro_block.c
+@@ -1294,7 +1294,6 @@ static void mspro_block_remove(struct memstick_dev *card)
+ del_gendisk(msb->disk);
+ dev_dbg(&card->dev, "mspro block remove\n");
+
+- blk_cleanup_queue(msb->queue);
+ blk_mq_free_tag_set(&msb->tag_set);
+ msb->queue = NULL;
+
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 912a398a9a764..2f89ae55c1773 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -2509,7 +2509,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ return md;
+
+ err_cleanup_queue:
+- blk_cleanup_queue(md->disk->queue);
+ blk_mq_free_tag_set(&md->queue.tag_set);
+ err_kfree:
+ kfree(md);
+diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
+index fa5324ceeebe4..f824cfdab75ac 100644
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -494,7 +494,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
+ if (blk_queue_quiesced(q))
+ blk_mq_unquiesce_queue(q);
+
+- blk_cleanup_queue(q);
+ blk_mq_free_tag_set(&mq->tag_set);
+
+ /*
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index 1f0120cbe9e80..8ad095c19f271 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -87,8 +87,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
+ static u16 ad_ticks_per_sec;
+ static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
+
+-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
+- MULTICAST_LACPDU_ADDR;
++const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
++ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
++};
+
+ /* ================= main 802.3ad protocol functions ================== */
+ static int ad_lacpdu_send(struct port *port);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index bff0bfd10e235..ab7cb48f8dfdd 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -865,12 +865,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
+ dev_uc_unsync(slave_dev, bond_dev);
+ dev_mc_unsync(slave_dev, bond_dev);
+
+- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+- /* del lacpdu mc addr from mc list */
+- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+-
+- dev_mc_del(slave_dev, lacpdu_multicast);
+- }
++ if (BOND_MODE(bond) == BOND_MODE_8023AD)
++ dev_mc_del(slave_dev, lacpdu_mcast_addr);
+ }
+
+ /*--------------------------- Active slave change ---------------------------*/
+@@ -890,7 +886,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
+ if (bond->dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(old_active->dev, -1);
+
+- bond_hw_addr_flush(bond->dev, old_active->dev);
++ if (bond->dev->flags & IFF_UP)
++ bond_hw_addr_flush(bond->dev, old_active->dev);
+ }
+
+ if (new_active) {
+@@ -901,10 +898,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
+ if (bond->dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(new_active->dev, 1);
+
+- netif_addr_lock_bh(bond->dev);
+- dev_uc_sync(new_active->dev, bond->dev);
+- dev_mc_sync(new_active->dev, bond->dev);
+- netif_addr_unlock_bh(bond->dev);
++ if (bond->dev->flags & IFF_UP) {
++ netif_addr_lock_bh(bond->dev);
++ dev_uc_sync(new_active->dev, bond->dev);
++ dev_mc_sync(new_active->dev, bond->dev);
++ netif_addr_unlock_bh(bond->dev);
++ }
+ }
+ }
+
+@@ -2139,16 +2138,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ }
+ }
+
+- netif_addr_lock_bh(bond_dev);
+- dev_mc_sync_multiple(slave_dev, bond_dev);
+- dev_uc_sync_multiple(slave_dev, bond_dev);
+- netif_addr_unlock_bh(bond_dev);
+-
+- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+- /* add lacpdu mc addr to mc list */
+- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
++ if (bond_dev->flags & IFF_UP) {
++ netif_addr_lock_bh(bond_dev);
++ dev_mc_sync_multiple(slave_dev, bond_dev);
++ dev_uc_sync_multiple(slave_dev, bond_dev);
++ netif_addr_unlock_bh(bond_dev);
+
+- dev_mc_add(slave_dev, lacpdu_multicast);
++ if (BOND_MODE(bond) == BOND_MODE_8023AD)
++ dev_mc_add(slave_dev, lacpdu_mcast_addr);
+ }
+ }
+
+@@ -2420,7 +2417,8 @@ static int __bond_release_one(struct net_device *bond_dev,
+ if (old_flags & IFF_ALLMULTI)
+ dev_set_allmulti(slave_dev, -1);
+
+- bond_hw_addr_flush(bond_dev, slave_dev);
++ if (old_flags & IFF_UP)
++ bond_hw_addr_flush(bond_dev, slave_dev);
+ }
+
+ slave_disable_netpoll(slave);
+@@ -4157,6 +4155,12 @@ static int bond_open(struct net_device *bond_dev)
+ struct list_head *iter;
+ struct slave *slave;
+
++ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
++ bond->rr_tx_counter = alloc_percpu(u32);
++ if (!bond->rr_tx_counter)
++ return -ENOMEM;
++ }
++
+ /* reset slave->backup and slave->inactive */
+ if (bond_has_slaves(bond)) {
+ bond_for_each_slave(bond, slave, iter) {
+@@ -4194,6 +4198,9 @@ static int bond_open(struct net_device *bond_dev)
+ /* register to receive LACPDUs */
+ bond->recv_probe = bond_3ad_lacpdu_recv;
+ bond_3ad_initiate_agg_selection(bond, 1);
++
++ bond_for_each_slave(bond, slave, iter)
++ dev_mc_add(slave->dev, lacpdu_mcast_addr);
+ }
+
+ if (bond_mode_can_use_xmit_hash(bond))
+@@ -4205,6 +4212,7 @@ static int bond_open(struct net_device *bond_dev)
+ static int bond_close(struct net_device *bond_dev)
+ {
+ struct bonding *bond = netdev_priv(bond_dev);
++ struct slave *slave;
+
+ bond_work_cancel_all(bond);
+ bond->send_peer_notif = 0;
+@@ -4212,6 +4220,19 @@ static int bond_close(struct net_device *bond_dev)
+ bond_alb_deinitialize(bond);
+ bond->recv_probe = NULL;
+
++ if (bond_uses_primary(bond)) {
++ rcu_read_lock();
++ slave = rcu_dereference(bond->curr_active_slave);
++ if (slave)
++ bond_hw_addr_flush(bond_dev, slave->dev);
++ rcu_read_unlock();
++ } else {
++ struct list_head *iter;
++
++ bond_for_each_slave(bond, slave, iter)
++ bond_hw_addr_flush(bond_dev, slave->dev);
++ }
++
+ return 0;
+ }
+
+@@ -6195,15 +6216,6 @@ static int bond_init(struct net_device *bond_dev)
+ if (!bond->wq)
+ return -ENOMEM;
+
+- if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
+- bond->rr_tx_counter = alloc_percpu(u32);
+- if (!bond->rr_tx_counter) {
+- destroy_workqueue(bond->wq);
+- bond->wq = NULL;
+- return -ENOMEM;
+- }
+- }
+-
+ spin_lock_init(&bond->stats_lock);
+ netdev_lockdep_set_classes(bond_dev);
+
+diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
+index d060088047f16..131467d37a45b 100644
+--- a/drivers/net/can/flexcan/flexcan-core.c
++++ b/drivers/net/can/flexcan/flexcan-core.c
+@@ -941,11 +941,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
+ u32 reg_ctrl, reg_id, reg_iflag1;
+ int i;
+
+- if (unlikely(drop)) {
+- skb = ERR_PTR(-ENOBUFS);
+- goto mark_as_read;
+- }
+-
+ mb = flexcan_get_mb(priv, n);
+
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
+@@ -974,6 +969,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
+ reg_ctrl = priv->read(&mb->can_ctrl);
+ }
+
++ if (unlikely(drop)) {
++ skb = ERR_PTR(-ENOBUFS);
++ goto mark_as_read;
++ }
++
+ if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
+ skb = alloc_canfd_skb(offload->dev, &cfd);
+ else
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index d3a658b444b5f..092cd51b3926e 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -824,6 +824,7 @@ static int gs_can_open(struct net_device *netdev)
+ flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+
+ /* finally start device */
++ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ dm->mode = cpu_to_le32(GS_CAN_MODE_START);
+ dm->flags = cpu_to_le32(flags);
+ rc = usb_control_msg(interface_to_usbdev(dev->iface),
+@@ -835,13 +836,12 @@ static int gs_can_open(struct net_device *netdev)
+ if (rc < 0) {
+ netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
+ kfree(dm);
++ dev->can.state = CAN_STATE_STOPPED;
+ return rc;
+ }
+
+ kfree(dm);
+
+- dev->can.state = CAN_STATE_ERROR_ACTIVE;
+-
+ parent->active_channels++;
+ if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_start_queue(netdev);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 964354536f9ce..111a952f880ee 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -662,7 +662,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+
+ for (i = 0; i < nr_pkts; i++) {
+ struct bnxt_sw_tx_bd *tx_buf;
+- bool compl_deferred = false;
+ struct sk_buff *skb;
+ int j, last;
+
+@@ -671,6 +670,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+ skb = tx_buf->skb;
+ tx_buf->skb = NULL;
+
++ tx_bytes += skb->len;
++
+ if (tx_buf->is_push) {
+ tx_buf->is_push = 0;
+ goto next_tx_int;
+@@ -691,8 +692,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+ }
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
++ /* PTP worker takes ownership of the skb */
+ if (!bnxt_get_tx_ts_p5(bp, skb))
+- compl_deferred = true;
++ skb = NULL;
+ else
+ atomic_inc(&bp->ptp_cfg->tx_avail);
+ }
+@@ -701,9 +703,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+ next_tx_int:
+ cons = NEXT_TX(cons);
+
+- tx_bytes += skb->len;
+- if (!compl_deferred)
+- dev_kfree_skb_any(skb);
++ dev_kfree_skb_any(skb);
+ }
+
+ netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+index 7f3c0875b6f58..8e316367f6ced 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+@@ -317,9 +317,9 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
+ (PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
+- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE))) {
++ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) {
+ ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
+- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE);
++ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE);
+ netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n");
+ }
+
+diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
+index a139f2e9d59f0..e0e8dfd137930 100644
+--- a/drivers/net/ethernet/freescale/enetc/Makefile
++++ b/drivers/net/ethernet/freescale/enetc/Makefile
+@@ -9,7 +9,6 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
+
+ obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
+ fsl-enetc-vf-y := enetc_vf.o $(common-objs)
+-fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
+
+ obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
+ fsl-enetc-ierb-y := enetc_ierb.o
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index 4470a4a3e4c3e..9f5b921039bd4 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -2432,7 +2432,7 @@ int enetc_close(struct net_device *ndev)
+ return 0;
+ }
+
+-static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
++int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+ {
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_mqprio_qopt *mqprio = type_data;
+@@ -2486,25 +2486,6 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+ return 0;
+ }
+
+-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+- void *type_data)
+-{
+- switch (type) {
+- case TC_SETUP_QDISC_MQPRIO:
+- return enetc_setup_tc_mqprio(ndev, type_data);
+- case TC_SETUP_QDISC_TAPRIO:
+- return enetc_setup_tc_taprio(ndev, type_data);
+- case TC_SETUP_QDISC_CBS:
+- return enetc_setup_tc_cbs(ndev, type_data);
+- case TC_SETUP_QDISC_ETF:
+- return enetc_setup_tc_txtime(ndev, type_data);
+- case TC_SETUP_BLOCK:
+- return enetc_setup_tc_psfp(ndev, type_data);
+- default:
+- return -EOPNOTSUPP;
+- }
+-}
+-
+ static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+ {
+@@ -2600,29 +2581,6 @@ static int enetc_set_rss(struct net_device *ndev, int en)
+ return 0;
+ }
+
+-static int enetc_set_psfp(struct net_device *ndev, int en)
+-{
+- struct enetc_ndev_priv *priv = netdev_priv(ndev);
+- int err;
+-
+- if (en) {
+- err = enetc_psfp_enable(priv);
+- if (err)
+- return err;
+-
+- priv->active_offloads |= ENETC_F_QCI;
+- return 0;
+- }
+-
+- err = enetc_psfp_disable(priv);
+- if (err)
+- return err;
+-
+- priv->active_offloads &= ~ENETC_F_QCI;
+-
+- return 0;
+-}
+-
+ static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
+ {
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+@@ -2641,11 +2599,9 @@ static void enetc_enable_txvlan(struct net_device *ndev, bool en)
+ enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+ }
+
+-int enetc_set_features(struct net_device *ndev,
+- netdev_features_t features)
++void enetc_set_features(struct net_device *ndev, netdev_features_t features)
+ {
+ netdev_features_t changed = ndev->features ^ features;
+- int err = 0;
+
+ if (changed & NETIF_F_RXHASH)
+ enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
+@@ -2657,11 +2613,6 @@ int enetc_set_features(struct net_device *ndev,
+ if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+ enetc_enable_txvlan(ndev,
+ !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+-
+- if (changed & NETIF_F_HW_TC)
+- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+-
+- return err;
+ }
+
+ #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
+index 29922c20531f0..2cfe6944ebd32 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc.h
+@@ -393,11 +393,9 @@ void enetc_start(struct net_device *ndev);
+ void enetc_stop(struct net_device *ndev);
+ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
+ struct net_device_stats *enetc_get_stats(struct net_device *ndev);
+-int enetc_set_features(struct net_device *ndev,
+- netdev_features_t features);
++void enetc_set_features(struct net_device *ndev, netdev_features_t features);
+ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
+-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+- void *type_data);
++int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
+ int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
+ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
+ struct xdp_frame **frames, u32 flags);
+@@ -465,6 +463,7 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
+ int enetc_psfp_init(struct enetc_ndev_priv *priv);
+ int enetc_psfp_clean(struct enetc_ndev_priv *priv);
++int enetc_set_psfp(struct net_device *ndev, bool en);
+
+ static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
+ {
+@@ -540,4 +539,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
+ {
+ return 0;
+ }
++
++static inline int enetc_set_psfp(struct net_device *ndev, bool en)
++{
++ return 0;
++}
+ #endif
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+index c4a0e836d4f09..bb7750222691d 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+@@ -709,6 +709,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
+ {
+ netdev_features_t changed = ndev->features ^ features;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
++ int err;
++
++ if (changed & NETIF_F_HW_TC) {
++ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
++ if (err)
++ return err;
++ }
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+@@ -722,7 +729,28 @@ static int enetc_pf_set_features(struct net_device *ndev,
+ if (changed & NETIF_F_LOOPBACK)
+ enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+- return enetc_set_features(ndev, features);
++ enetc_set_features(ndev, features);
++
++ return 0;
++}
++
++static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
++ void *type_data)
++{
++ switch (type) {
++ case TC_SETUP_QDISC_MQPRIO:
++ return enetc_setup_tc_mqprio(ndev, type_data);
++ case TC_SETUP_QDISC_TAPRIO:
++ return enetc_setup_tc_taprio(ndev, type_data);
++ case TC_SETUP_QDISC_CBS:
++ return enetc_setup_tc_cbs(ndev, type_data);
++ case TC_SETUP_QDISC_ETF:
++ return enetc_setup_tc_txtime(ndev, type_data);
++ case TC_SETUP_BLOCK:
++ return enetc_setup_tc_psfp(ndev, type_data);
++ default:
++ return -EOPNOTSUPP;
++ }
+ }
+
+ static const struct net_device_ops enetc_ndev_ops = {
+@@ -739,7 +767,7 @@ static const struct net_device_ops enetc_ndev_ops = {
+ .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
+ .ndo_set_features = enetc_pf_set_features,
+ .ndo_eth_ioctl = enetc_ioctl,
+- .ndo_setup_tc = enetc_setup_tc,
++ .ndo_setup_tc = enetc_pf_setup_tc,
+ .ndo_bpf = enetc_setup_bpf,
+ .ndo_xdp_xmit = enetc_xdp_xmit,
+ };
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+index 582a663ed0ba4..f8a2f02ce22de 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+@@ -1517,6 +1517,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ }
+ }
+
++int enetc_set_psfp(struct net_device *ndev, bool en)
++{
++ struct enetc_ndev_priv *priv = netdev_priv(ndev);
++ int err;
++
++ if (en) {
++ err = enetc_psfp_enable(priv);
++ if (err)
++ return err;
++
++ priv->active_offloads |= ENETC_F_QCI;
++ return 0;
++ }
++
++ err = enetc_psfp_disable(priv);
++ if (err)
++ return err;
++
++ priv->active_offloads &= ~ENETC_F_QCI;
++
++ return 0;
++}
++
+ int enetc_psfp_init(struct enetc_ndev_priv *priv)
+ {
+ if (epsfp.psfp_sfi_bitmap)
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+index 17924305afa2f..dfcaac302e245 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+@@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
+ static int enetc_vf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+ {
+- return enetc_set_features(ndev, features);
++ enetc_set_features(ndev, features);
++
++ return 0;
++}
++
++static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
++ void *type_data)
++{
++ switch (type) {
++ case TC_SETUP_QDISC_MQPRIO:
++ return enetc_setup_tc_mqprio(ndev, type_data);
++ default:
++ return -EOPNOTSUPP;
++ }
+ }
+
+ /* Probing/ Init */
+@@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = {
+ .ndo_set_mac_address = enetc_vf_set_mac_addr,
+ .ndo_set_features = enetc_vf_set_features,
+ .ndo_eth_ioctl = enetc_ioctl,
+- .ndo_setup_tc = enetc_setup_tc,
++ .ndo_setup_tc = enetc_vf_setup_tc,
+ };
+
+ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+index 8c939628e2d85..2e6461b0ea8bc 100644
+--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
++++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
+ int err;
+
+ err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
+- &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
++ &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC);
+ if (err)
+ return err;
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 1aaf0c5ddf6cf..57e27f2024d38 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -5785,6 +5785,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
+ }
+ }
+
++/**
++ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
++ * @vsi: Pointer to vsi structure
++ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
++ *
++ * Helper function to convert units before send to set BW limit
++ **/
++static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
++{
++ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
++ dev_warn(&vsi->back->pdev->dev,
++ "Setting max tx rate to minimum usable value of 50Mbps.\n");
++ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
++ } else {
++ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
++ }
++
++ return max_tx_rate;
++}
++
+ /**
+ * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
+ * @vsi: VSI to be configured
+@@ -5807,10 +5827,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
+ max_tx_rate, seid);
+ return -EINVAL;
+ }
+- if (max_tx_rate && max_tx_rate < 50) {
++ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
+ dev_warn(&pf->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+- max_tx_rate = 50;
++ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+ }
+
+ /* Tx rate credits are in values of 50Mbps, 0 is disabled */
+@@ -8101,9 +8121,9 @@ config_tc:
+
+ if (i40e_is_tc_mqprio_enabled(pf)) {
+ if (vsi->mqprio_qopt.max_rate[0]) {
+- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
++ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
++ vsi->mqprio_qopt.max_rate[0]);
+
+- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+ if (!ret) {
+ u64 credits = max_tx_rate;
+@@ -10848,10 +10868,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ }
+
+ if (vsi->mqprio_qopt.max_rate[0]) {
+- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
++ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
++ vsi->mqprio_qopt.max_rate[0]);
+ u64 credits = 0;
+
+- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+ if (ret)
+ goto end_unlock;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 86b0f21287dc8..67fbaaad39859 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -2038,6 +2038,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
+ }
+ }
+
++/**
++ * i40e_vc_get_max_frame_size
++ * @vf: pointer to the VF
++ *
++ * Max frame size is determined based on the current port's max frame size and
++ * whether a port VLAN is configured on this VF. The VF is not aware whether
++ * it's in a port VLAN so the PF needs to account for this in max frame size
++ * checks and sending the max frame size to the VF.
++ **/
++static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
++{
++ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
++
++ if (vf->port_vlan_id)
++ max_frame_size -= VLAN_HLEN;
++
++ return max_frame_size;
++}
++
+ /**
+ * i40e_vc_get_vf_resources_msg
+ * @vf: pointer to the VF info
+@@ -2139,6 +2158,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
+ vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
++ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
+
+ if (vf->lan_vsi_idx) {
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+index 06d18797d25a2..18b6a702a1d6d 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
+ {
+ u32 head, tail;
+
++ /* underlying hardware might not allow access and/or always return
++ * 0 for the head/tail registers so just use the cached values
++ */
+ head = ring->next_to_clean;
+- tail = readl(ring->tail);
++ tail = ring->next_to_use;
+
+ if (head != tail)
+ return (head < tail) ?
+@@ -1390,7 +1393,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
+ #endif
+ struct sk_buff *skb;
+
+- if (!rx_buffer)
++ if (!rx_buffer || !size)
+ return NULL;
+ /* prefetch first cache line of first page */
+ va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+@@ -1548,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+- if (rx_buffer)
++ if (rx_buffer && size)
+ rx_buffer->pagecnt_bias++;
+ break;
+ }
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index 1603e99bae4af..498797a0a0a95 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -273,11 +273,14 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
+ void iavf_configure_queues(struct iavf_adapter *adapter)
+ {
+ struct virtchnl_vsi_queue_config_info *vqci;
+- struct virtchnl_queue_pair_info *vqpi;
++ int i, max_frame = adapter->vf_res->max_mtu;
+ int pairs = adapter->num_active_queues;
+- int i, max_frame = IAVF_MAX_RXBUFFER;
++ struct virtchnl_queue_pair_info *vqpi;
+ size_t len;
+
++ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
++ max_frame = IAVF_MAX_RXBUFFER;
++
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 6c4e1d45235ef..1169fd7811b09 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -911,7 +911,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
+ */
+ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+ {
+- u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
++ u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
+ u16 num_txq_per_tc, num_rxq_per_tc;
+ u16 qcount_tx = vsi->alloc_txq;
+ u16 qcount_rx = vsi->alloc_rxq;
+@@ -978,23 +978,25 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+ * at least 1)
+ */
+ if (offset)
+- vsi->num_rxq = offset;
++ rx_count = offset;
+ else
+- vsi->num_rxq = num_rxq_per_tc;
++ rx_count = num_rxq_per_tc;
+
+- if (vsi->num_rxq > vsi->alloc_rxq) {
++ if (rx_count > vsi->alloc_rxq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
+- vsi->num_rxq, vsi->alloc_rxq);
++ rx_count, vsi->alloc_rxq);
+ return -EINVAL;
+ }
+
+- vsi->num_txq = tx_count;
+- if (vsi->num_txq > vsi->alloc_txq) {
++ if (tx_count > vsi->alloc_txq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
+- vsi->num_txq, vsi->alloc_txq);
++ tx_count, vsi->alloc_txq);
+ return -EINVAL;
+ }
+
++ vsi->num_txq = tx_count;
++ vsi->num_rxq = rx_count;
++
+ if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
+ dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
+ /* since there is a chance that num_rxq could have been changed
+@@ -3487,6 +3489,7 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
+ u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
+ u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
+ int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
++ u16 new_txq, new_rxq;
+ u8 netdev_tc = 0;
+ int i;
+
+@@ -3527,21 +3530,24 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
+ }
+ }
+
+- /* Set actual Tx/Rx queue pairs */
+- vsi->num_txq = offset + qcount_tx;
+- if (vsi->num_txq > vsi->alloc_txq) {
++ new_txq = offset + qcount_tx;
++ if (new_txq > vsi->alloc_txq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
+- vsi->num_txq, vsi->alloc_txq);
++ new_txq, vsi->alloc_txq);
+ return -EINVAL;
+ }
+
+- vsi->num_rxq = offset + qcount_rx;
+- if (vsi->num_rxq > vsi->alloc_rxq) {
++ new_rxq = offset + qcount_rx;
++ if (new_rxq > vsi->alloc_rxq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
+- vsi->num_rxq, vsi->alloc_rxq);
++ new_rxq, vsi->alloc_rxq);
+ return -EINVAL;
+ }
+
++ /* Set actual Tx/Rx queue pairs */
++ vsi->num_txq = new_txq;
++ vsi->num_rxq = new_rxq;
++
+ /* Setup queue TC[0].qmap for given VSI context */
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
+@@ -3573,6 +3579,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
+ {
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_pf *pf = vsi->back;
++ struct ice_tc_cfg old_tc_cfg;
+ struct ice_vsi_ctx *ctx;
+ struct device *dev;
+ int i, ret = 0;
+@@ -3597,6 +3604,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
+ max_txqs[i] = vsi->num_txq;
+ }
+
++ memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
+ vsi->tc_cfg.ena_tc = ena_tc;
+ vsi->tc_cfg.numtc = num_tc;
+
+@@ -3613,8 +3621,10 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
+ else
+ ret = ice_vsi_setup_q_map(vsi, ctx);
+
+- if (ret)
++ if (ret) {
++ memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
+ goto out;
++ }
+
+ /* must to indicate which section of VSI context are being modified */
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 4c6bb7482b362..48befe1e2872c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2399,8 +2399,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
+ return -EBUSY;
+ }
+
+- ice_unplug_aux_dev(pf);
+-
+ switch (reset) {
+ case ICE_RESET_PFR:
+ set_bit(ICE_PFR_REQ, pf->state);
+@@ -6629,7 +6627,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
+ */
+ int ice_down(struct ice_vsi *vsi)
+ {
+- int i, tx_err, rx_err, link_err = 0, vlan_err = 0;
++ int i, tx_err, rx_err, vlan_err = 0;
+
+ WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
+
+@@ -6663,20 +6661,13 @@ int ice_down(struct ice_vsi *vsi)
+
+ ice_napi_disable_all(vsi);
+
+- if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
+- link_err = ice_force_phys_link_state(vsi, false);
+- if (link_err)
+- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+- vsi->vsi_num, link_err);
+- }
+-
+ ice_for_each_txq(vsi, i)
+ ice_clean_tx_ring(vsi->tx_rings[i]);
+
+ ice_for_each_rxq(vsi, i)
+ ice_clean_rx_ring(vsi->rx_rings[i]);
+
+- if (tx_err || rx_err || link_err || vlan_err) {
++ if (tx_err || rx_err || vlan_err) {
+ netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
+ vsi->vsi_num, vsi->vsw->sw_id);
+ return -EIO;
+@@ -6838,6 +6829,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
+ if (err)
+ goto err_setup_rx;
+
++ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
++
+ if (vsi->type == ICE_VSI_PF) {
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
+@@ -8876,6 +8869,16 @@ int ice_stop(struct net_device *netdev)
+ return -EBUSY;
+ }
+
++ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
++ int link_err = ice_force_phys_link_state(vsi, false);
++
++ if (link_err) {
++ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
++ vsi->vsi_num, link_err);
++ return -EIO;
++ }
++ }
++
+ ice_vsi_close(vsi);
+
+ return 0;
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index 836dce8407124..97453d1dfafed 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -610,7 +610,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ if (test_bit(ICE_VSI_DOWN, vsi->state))
+ return -ENETDOWN;
+
+- if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
++ if (!ice_is_xdp_ena_vsi(vsi))
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+@@ -621,6 +621,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ xdp_ring = vsi->xdp_rings[queue_index];
+ spin_lock(&xdp_ring->tx_lock);
+ } else {
++ /* Generally, should not happen */
++ if (unlikely(queue_index >= vsi->num_xdp_txq))
++ return -ENXIO;
+ xdp_ring = vsi->xdp_rings[queue_index];
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
+index 85155cd9405c5..4aeb927c37153 100644
+--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
++++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
+@@ -179,6 +179,9 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
+ /* Only return ad bits of the gw register */
+ ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
+
++ /* The MDIO lock is set on read. To release it, clear gw register */
++ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
++
+ return ret;
+ }
+
+@@ -203,6 +206,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
+ temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
+ 5, 1000000);
+
++ /* The MDIO lock is set on read. To release it, clear gw register */
++ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
++
+ return ret;
+ }
+
+diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+index 49b85ca578b01..9820efce72ffe 100644
+--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+@@ -370,6 +370,11 @@ static void mana_gd_process_eq_events(void *arg)
+ break;
+ }
+
++ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
++ * reading eqe.
++ */
++ rmb();
++
+ mana_gd_process_eqe(eq);
+
+ eq->head++;
+@@ -1107,6 +1112,11 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
+ if (WARN_ON_ONCE(owner_bits != new_bits))
+ return -1;
+
++ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
++ * reading completion info
++ */
++ rmb();
++
+ comp->wq_num = cqe->cqe_info.wq_num;
+ comp->is_sq = cqe->cqe_info.is_sq;
+ memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index b357ac4c56c59..7e32b04eb0c75 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1449,6 +1449,8 @@ static int ravb_phy_init(struct net_device *ndev)
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ }
+
++ /* Indicate that the MAC is responsible for managing PHY PM */
++ phydev->mac_managed_pm = true;
+ phy_attached_info(phydev);
+
+ return 0;
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 67ade78fb7671..7fd8828d3a846 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -2029,6 +2029,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
+ if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
+ phy_set_max_speed(phydev, SPEED_100);
+
++ /* Indicate that the MAC is responsible for managing PHY PM */
++ phydev->mac_managed_pm = true;
+ phy_attached_info(phydev);
+
+ return 0;
+diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
+index 032b8c0bd7889..5b4d661ab9867 100644
+--- a/drivers/net/ethernet/sfc/efx_channels.c
++++ b/drivers/net/ethernet/sfc/efx_channels.c
+@@ -319,7 +319,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
+ efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+- efx->tx_channel_offset = 1;
++ efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ efx->legacy_irq = efx->pci_dev->irq;
+diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
+index 017212a40df38..f54ebd0072868 100644
+--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
++++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
+@@ -320,7 +320,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
+ efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+- efx->tx_channel_offset = 1;
++ efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
+ efx->legacy_irq = efx->pci_dev->irq;
+diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c
+index e166dcb9b99ce..91e87594ed1ea 100644
+--- a/drivers/net/ethernet/sfc/siena/tx.c
++++ b/drivers/net/ethernet/sfc/siena/tx.c
+@@ -336,7 +336,7 @@ netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
+ * previous packets out.
+ */
+ if (!netdev_xmit_more())
+- efx_tx_send_pending(tx_queue->channel);
++ efx_tx_send_pending(efx_get_tx_channel(efx, index));
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
+index 138bca6113415..80ed7f760bd30 100644
+--- a/drivers/net/ethernet/sfc/tx.c
++++ b/drivers/net/ethernet/sfc/tx.c
+@@ -549,7 +549,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+ * previous packets out.
+ */
+ if (!netdev_xmit_more())
+- efx_tx_send_pending(tx_queue->channel);
++ efx_tx_send_pending(efx_get_tx_channel(efx, index));
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
+index 8594ee839628b..88aa0d310aeef 100644
+--- a/drivers/net/ethernet/sun/sunhme.c
++++ b/drivers/net/ethernet/sun/sunhme.c
+@@ -2020,9 +2020,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
+
+ skb_reserve(copy_skb, 2);
+ skb_put(copy_skb, len);
+- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
++ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
+ skb_copy_from_linear_data(skb, copy_skb->data, len);
+- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
++ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
+ /* Reuse original ring buffer. */
+ hme_write_rxd(hp, this,
+ (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
+diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
+index ec010cf2e816a..6f874f99b910c 100644
+--- a/drivers/net/ipa/ipa_qmi.c
++++ b/drivers/net/ipa/ipa_qmi.c
+@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
+ req.v4_route_tbl_info_valid = 1;
+ req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
+- req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
++ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
+ req.v6_route_tbl_info_valid = 1;
+ req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
+- req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
++ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
+ req.v4_filter_tbl_start_valid = 1;
+@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+ req.v4_hash_route_tbl_info_valid = 1;
+ req.v4_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+- req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
++ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+ }
+
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
+@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+ req.v6_hash_route_tbl_info_valid = 1;
+ req.v6_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+- req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
++ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+ }
+
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
+diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
+index 6838e8065072b..75d3fc0092e92 100644
+--- a/drivers/net/ipa/ipa_qmi_msg.c
++++ b/drivers/net/ipa/ipa_qmi_msg.c
+@@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info),
+- .ei_array = ipa_mem_array_ei,
++ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+@@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info),
+- .ei_array = ipa_mem_array_ei,
++ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+@@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+ .tlv_type = 0x1b,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info),
+- .ei_array = ipa_mem_array_ei,
++ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+@@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+ .tlv_type = 0x1c,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info),
+- .ei_array = ipa_mem_array_ei,
++ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
+index 495e85abe50bd..9651aa59b5968 100644
+--- a/drivers/net/ipa/ipa_qmi_msg.h
++++ b/drivers/net/ipa/ipa_qmi_msg.h
+@@ -86,9 +86,11 @@ enum ipa_platform_type {
+ IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
+ };
+
+-/* This defines the start and end offset of a range of memory. Both
+- * fields are offsets relative to the start of IPA shared memory.
+- * The end value is the last addressable byte *within* the range.
++/* This defines the start and end offset of a range of memory. The start
++ * value is a byte offset relative to the start of IPA shared memory. The
++ * end value is the last addressable unit *within* the range. Typically
++ * the end value is in units of bytes, however it can also be a maximum
++ * array index value.
+ */
+ struct ipa_mem_bounds {
+ u32 start;
+@@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req {
+ u8 hdr_tbl_info_valid;
+ struct ipa_mem_bounds hdr_tbl_info;
+
+- /* Routing table information. These define the location and size of
+- * non-hashable IPv4 and IPv6 filter tables. The start values are
+- * offsets relative to the start of IPA shared memory.
++ /* Routing table information. These define the location and maximum
++ * *index* (not byte) for the modem portion of non-hashable IPv4 and
++ * IPv6 routing tables. The start values are byte offsets relative
++ * to the start of IPA shared memory.
+ */
+ u8 v4_route_tbl_info_valid;
+- struct ipa_mem_array v4_route_tbl_info;
++ struct ipa_mem_bounds v4_route_tbl_info;
+ u8 v6_route_tbl_info_valid;
+- struct ipa_mem_array v6_route_tbl_info;
++ struct ipa_mem_bounds v6_route_tbl_info;
+
+ /* Filter table information. These define the location of the
+ * non-hashable IPv4 and IPv6 filter tables. The start values are
+- * offsets relative to the start of IPA shared memory.
++ * byte offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_filter_tbl_start_valid;
+ u32 v4_filter_tbl_start;
+@@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req {
+ u8 zip_tbl_info_valid;
+ struct ipa_mem_bounds zip_tbl_info;
+
+- /* Routing table information. These define the location and size
+- * of hashable IPv4 and IPv6 filter tables. The start values are
+- * offsets relative to the start of IPA shared memory.
++ /* Routing table information. These define the location and maximum
++ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
++ * routing tables (if supported by hardware). The start values are
++ * byte offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_hash_route_tbl_info_valid;
+- struct ipa_mem_array v4_hash_route_tbl_info;
++ struct ipa_mem_bounds v4_hash_route_tbl_info;
+ u8 v6_hash_route_tbl_info_valid;
+- struct ipa_mem_array v6_hash_route_tbl_info;
++ struct ipa_mem_bounds v6_hash_route_tbl_info;
+
+ /* Filter table information. These define the location and size
+- * of hashable IPv4 and IPv6 filter tables. The start values are
+- * offsets relative to the start of IPA shared memory.
++ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
++ * The start values are byte offsets relative to the start of IPA
++ * shared memory.
+ */
+ u8 v4_hash_filter_tbl_start_valid;
+ u32 v4_hash_filter_tbl_start;
+diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
+index 2f5a58bfc529a..69efe672ca528 100644
+--- a/drivers/net/ipa/ipa_table.c
++++ b/drivers/net/ipa/ipa_table.c
+@@ -108,8 +108,6 @@
+
+ /* Assignment of route table entries to the modem and AP */
+ #define IPA_ROUTE_MODEM_MIN 0
+-#define IPA_ROUTE_MODEM_COUNT 8
+-
+ #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
+ #define IPA_ROUTE_AP_COUNT \
+ (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
+diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
+index b6a9a0d79d68e..1538e2e1732fe 100644
+--- a/drivers/net/ipa/ipa_table.h
++++ b/drivers/net/ipa/ipa_table.h
+@@ -13,6 +13,9 @@ struct ipa;
+ /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
+ #define IPA_FILTER_COUNT_MAX 14
+
++/* The number of route table entries allotted to the modem */
++#define IPA_ROUTE_MODEM_COUNT 8
++
+ /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
+ #define IPA_ROUTE_COUNT_MAX 15
+
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index 6ffb27419e64b..c58123e136896 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -495,7 +495,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+
+ static int ipvlan_process_outbound(struct sk_buff *skb)
+ {
+- struct ethhdr *ethh = eth_hdr(skb);
+ int ret = NET_XMIT_DROP;
+
+ /* The ipvlan is a pseudo-L2 device, so the packets that we receive
+@@ -505,6 +504,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
+ if (skb_mac_header_was_set(skb)) {
+ /* In this mode we dont care about
+ * multicast and broadcast traffic */
++ struct ethhdr *ethh = eth_hdr(skb);
++
+ if (is_multicast_ether_addr(ethh->h_dest)) {
+ pr_debug_ratelimited(
+ "Dropped {multi|broad}cast of type=[%x]\n",
+@@ -589,7 +590,7 @@ out:
+ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
+ {
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+- struct ethhdr *eth = eth_hdr(skb);
++ struct ethhdr *eth = skb_eth_hdr(skb);
+ struct ipvl_addr *addr;
+ void *lyr3h;
+ int addr_type;
+@@ -619,6 +620,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
+ return dev_forward_skb(ipvlan->phy_dev, skb);
+
+ } else if (is_multicast_ether_addr(eth->h_dest)) {
++ skb_reset_mac_header(skb);
+ ipvlan_skb_crossing_ns(skb, NULL);
+ ipvlan_multicast_enqueue(ipvlan->port, skb, true);
+ return NET_XMIT_SUCCESS;
+diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
+index 9e3c815a070f1..796e9c7857d09 100644
+--- a/drivers/net/mdio/of_mdio.c
++++ b/drivers/net/mdio/of_mdio.c
+@@ -231,6 +231,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+ return 0;
+
+ unregister:
++ of_node_put(child);
+ mdiobus_unregister(mdio);
+ return rc;
+ }
+diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
+index 605a38e16db05..0e58aa7f0374e 100644
+--- a/drivers/net/netdevsim/hwstats.c
++++ b/drivers/net/netdevsim/hwstats.c
+@@ -433,11 +433,11 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
+ goto err_remove_hwstats_recursive;
+ }
+
+- debugfs_create_file("enable_ifindex", 0600, hwstats->l3_ddir, hwstats,
++ debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_enable_fops.fops);
+- debugfs_create_file("disable_ifindex", 0600, hwstats->l3_ddir, hwstats,
++ debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_disable_fops.fops);
+- debugfs_create_file("fail_next_enable", 0600, hwstats->l3_ddir, hwstats,
++ debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_fail_fops.fops);
+
+ INIT_DELAYED_WORK(&hwstats->traffic_dw,
+diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
+index c7047f5d7a9b0..8bc0957a0f6d3 100644
+--- a/drivers/net/phy/aquantia_main.c
++++ b/drivers/net/phy/aquantia_main.c
+@@ -90,6 +90,9 @@
+ #define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
+ #define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+
++#define VEND1_GLOBAL_GEN_STAT2 0xc831
++#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
++
+ #define VEND1_GLOBAL_RSVD_STAT1 0xc885
+ #define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
+ #define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
+@@ -124,6 +127,12 @@
+ #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
+ #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+
++/* Sleep and timeout for checking if the Processor-Intensive
++ * MDIO operation is finished
++ */
++#define AQR107_OP_IN_PROG_SLEEP 1000
++#define AQR107_OP_IN_PROG_TIMEOUT 100000
++
+ struct aqr107_hw_stat {
+ const char *name;
+ int reg;
+@@ -596,16 +605,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
+ phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
+ }
+
++static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
++{
++ int val, err;
++
++ /* The datasheet notes to wait at least 1ms after issuing a
++ * processor intensive operation before checking.
++ * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
++ * because that just determines the maximum time slept, not the minimum.
++ */
++ usleep_range(1000, 5000);
++
++ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
++ VEND1_GLOBAL_GEN_STAT2, val,
++ !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
++ AQR107_OP_IN_PROG_SLEEP,
++ AQR107_OP_IN_PROG_TIMEOUT, false);
++ if (err) {
++ phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
++ return err;
++ }
++
++ return 0;
++}
++
+ static int aqr107_suspend(struct phy_device *phydev)
+ {
+- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+- MDIO_CTRL1_LPOWER);
++ int err;
++
++ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
++ MDIO_CTRL1_LPOWER);
++ if (err)
++ return err;
++
++ return aqr107_wait_processor_intensive_op(phydev);
+ }
+
+ static int aqr107_resume(struct phy_device *phydev)
+ {
+- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+- MDIO_CTRL1_LPOWER);
++ int err;
++
++ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
++ MDIO_CTRL1_LPOWER);
++ if (err)
++ return err;
++
++ return aqr107_wait_processor_intensive_op(phydev);
+ }
+
+ static int aqr107_probe(struct phy_device *phydev)
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 34483a4bd688a..e8e1101911b2f 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -2662,16 +2662,19 @@ static int lan8804_config_init(struct phy_device *phydev)
+ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
+ {
+ int irq_status, tsu_irq_status;
++ int ret = IRQ_NONE;
+
+ irq_status = phy_read(phydev, LAN8814_INTS);
+- if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
+- phy_trigger_machine(phydev);
+-
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
++ if (irq_status & LAN8814_INT_LINK) {
++ phy_trigger_machine(phydev);
++ ret = IRQ_HANDLED;
++ }
++
+ while (1) {
+ tsu_irq_status = lanphy_read_page_reg(phydev, 4,
+ LAN8814_INTR_STS_REG);
+@@ -2680,12 +2683,15 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
+ (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ |
+ LAN8814_INTR_STS_REG_1588_TSU1_ |
+ LAN8814_INTR_STS_REG_1588_TSU2_ |
+- LAN8814_INTR_STS_REG_1588_TSU3_)))
++ LAN8814_INTR_STS_REG_1588_TSU3_))) {
+ lan8814_handle_ptp_interrupt(phydev);
+- else
++ ret = IRQ_HANDLED;
++ } else {
+ break;
++ }
+ }
+- return IRQ_HANDLED;
++
++ return ret;
+ }
+
+ static int lan8814_ack_interrupt(struct phy_device *phydev)
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index b07dde6f0abf2..b9899913d2467 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1275,10 +1275,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
+ }
+ }
+
+- netif_addr_lock_bh(dev);
+- dev_uc_sync_multiple(port_dev, dev);
+- dev_mc_sync_multiple(port_dev, dev);
+- netif_addr_unlock_bh(dev);
++ if (dev->flags & IFF_UP) {
++ netif_addr_lock_bh(dev);
++ dev_uc_sync_multiple(port_dev, dev);
++ dev_mc_sync_multiple(port_dev, dev);
++ netif_addr_unlock_bh(dev);
++ }
+
+ port->index = -1;
+ list_add_tail_rcu(&port->list, &team->port_list);
+@@ -1349,8 +1351,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
+ netdev_rx_handler_unregister(port_dev);
+ team_port_disable_netpoll(port);
+ vlan_vids_del_by_dev(port_dev, dev);
+- dev_uc_unsync(port_dev, dev);
+- dev_mc_unsync(port_dev, dev);
++ if (dev->flags & IFF_UP) {
++ dev_uc_unsync(port_dev, dev);
++ dev_mc_unsync(port_dev, dev);
++ }
+ dev_close(port_dev);
+ team_port_leave(team, port);
+
+@@ -1700,6 +1704,14 @@ static int team_open(struct net_device *dev)
+
+ static int team_close(struct net_device *dev)
+ {
++ struct team *team = netdev_priv(dev);
++ struct team_port *port;
++
++ list_for_each_entry(port, &team->port_list, list) {
++ dev_uc_unsync(port->dev, dev);
++ dev_mc_unsync(port->dev, dev);
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
+index d0f3b6d7f4089..5c804bcabfe6b 100644
+--- a/drivers/net/wireguard/netlink.c
++++ b/drivers/net/wireguard/netlink.c
+@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
+ if (attrs[WGPEER_A_ENDPOINT]) {
+ struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
+ size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
++ struct endpoint endpoint = { { { 0 } } };
+
+- if ((len == sizeof(struct sockaddr_in) &&
+- addr->sa_family == AF_INET) ||
+- (len == sizeof(struct sockaddr_in6) &&
+- addr->sa_family == AF_INET6)) {
+- struct endpoint endpoint = { { { 0 } } };
+-
+- memcpy(&endpoint.addr, addr, len);
++ if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
++ endpoint.addr4 = *(struct sockaddr_in *)addr;
++ wg_socket_set_peer_endpoint(peer, &endpoint);
++ } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
++ endpoint.addr6 = *(struct sockaddr_in6 *)addr;
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+ }
+ }
+diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
+index ba87d294604fe..d4bb40a695ab6 100644
+--- a/drivers/net/wireguard/selftest/ratelimiter.c
++++ b/drivers/net/wireguard/selftest/ratelimiter.c
+@@ -6,29 +6,28 @@
+ #ifdef DEBUG
+
+ #include <linux/jiffies.h>
+-#include <linux/hrtimer.h>
+
+ static const struct {
+ bool result;
+- u64 nsec_to_sleep_before;
++ unsigned int msec_to_sleep_before;
+ } expected_results[] __initconst = {
+ [0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
+ [PACKETS_BURSTABLE] = { false, 0 },
+- [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND },
++ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
+ [PACKETS_BURSTABLE + 2] = { false, 0 },
+- [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
++ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
+ [PACKETS_BURSTABLE + 4] = { true, 0 },
+ [PACKETS_BURSTABLE + 5] = { false, 0 }
+ };
+
+ static __init unsigned int maximum_jiffies_at_index(int index)
+ {
+- u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3;
++ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
+ int i;
+
+ for (i = 0; i <= index; ++i)
+- total_nsecs += expected_results[i].nsec_to_sleep_before;
+- return nsecs_to_jiffies(total_nsecs);
++ total_msecs += expected_results[i].msec_to_sleep_before;
++ return msecs_to_jiffies(total_msecs);
+ }
+
+ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
+@@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
+ loop_start_time = jiffies;
+
+ for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
+- if (expected_results[i].nsec_to_sleep_before) {
+- ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3),
+- ns_to_ktime(expected_results[i].nsec_to_sleep_before));
+- set_current_state(TASK_UNINTERRUPTIBLE);
+- schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME);
+- }
++ if (expected_results[i].msec_to_sleep_before)
++ msleep(expected_results[i].msec_to_sleep_before);
+
+ if (time_is_before_jiffies(loop_start_time +
+ maximum_jiffies_at_index(i)))
+@@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void)
+ if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
+ return true;
+
+- BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0);
++ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
+
+ if (wg_ratelimiter_init())
+ goto out;
+@@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void)
+ ++test;
+ #endif
+
+- for (trials = TRIALS_BEFORE_GIVING_UP;;) {
++ for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
+ int test_count = 0, ret;
+
+ ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
+diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
+index a647a406b87be..b20409f8c13ab 100644
+--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
++++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
+@@ -140,6 +140,7 @@ config IWLMEI
+ depends on INTEL_MEI
+ depends on PM
+ depends on CFG80211
++ depends on BROKEN
+ help
+ Enables the iwlmei kernel module.
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index 9e832b27170fe..a4eb025f504f3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -1138,7 +1138,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
+ offset %= 32;
+
+ val = mt76_rr(dev, addr);
+- val >>= (tid % 32);
++ val >>= offset;
+
+ if (offset > 20) {
+ addr += 4;
+diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
+index 629d10fcf53b2..b9f1a8e9f88cb 100644
+--- a/drivers/nvdimm/pmem.c
++++ b/drivers/nvdimm/pmem.c
+@@ -45,7 +45,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
+ return to_nd_region(to_dev(pmem)->parent);
+ }
+
+-static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset)
++static phys_addr_t pmem_to_phys(struct pmem_device *pmem, phys_addr_t offset)
+ {
+ return pmem->phys_addr + offset;
+ }
+@@ -63,7 +63,7 @@ static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector)
+ static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset,
+ unsigned int len)
+ {
+- phys_addr_t phys = to_phys(pmem, offset);
++ phys_addr_t phys = pmem_to_phys(pmem, offset);
+ unsigned long pfn_start, pfn_end, pfn;
+
+ /* only pmem in the linear map supports HWPoison */
+@@ -97,7 +97,7 @@ static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks)
+ static long __pmem_clear_poison(struct pmem_device *pmem,
+ phys_addr_t offset, unsigned int len)
+ {
+- phys_addr_t phys = to_phys(pmem, offset);
++ phys_addr_t phys = pmem_to_phys(pmem, offset);
+ long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len);
+
+ if (cleared > 0) {
+diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
+index d702d7d60235d..2d23b7d41f7e6 100644
+--- a/drivers/nvme/host/apple.c
++++ b/drivers/nvme/host/apple.c
+@@ -1502,7 +1502,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
+
+ if (!blk_get_queue(anv->ctrl.admin_q)) {
+ nvme_start_admin_queue(&anv->ctrl);
+- blk_cleanup_queue(anv->ctrl.admin_q);
++ blk_mq_destroy_queue(anv->ctrl.admin_q);
+ anv->ctrl.admin_q = NULL;
+ ret = -ENODEV;
+ goto put_dev;
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 2f965356f3453..6d76fc608b741 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4105,7 +4105,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
+ if (!nvme_ns_head_multipath(ns->head))
+ nvme_cdev_del(&ns->cdev, &ns->cdev_device);
+ del_gendisk(ns->disk);
+- blk_cleanup_queue(ns->queue);
+
+ down_write(&ns->ctrl->namespaces_rwsem);
+ list_del_init(&ns->list);
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 4aff83b1b0c05..9a5ce70d7f215 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2392,7 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref)
+ unsigned long flags;
+
+ if (ctrl->ctrl.tagset) {
+- blk_cleanup_queue(ctrl->ctrl.connect_q);
++ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ }
+
+@@ -2402,8 +2402,8 @@ nvme_fc_ctrl_free(struct kref *ref)
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
+
+ nvme_start_admin_queue(&ctrl->ctrl);
+- blk_cleanup_queue(ctrl->ctrl.admin_q);
+- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
++ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
++ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+
+ kfree(ctrl->queues);
+@@ -2953,7 +2953,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
+ out_delete_hw_queues:
+ nvme_fc_delete_hw_io_queues(ctrl);
+ out_cleanup_blk_queue:
+- blk_cleanup_queue(ctrl->ctrl.connect_q);
++ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
+ out_free_tag_set:
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ nvme_fc_free_io_queues(ctrl);
+@@ -3642,9 +3642,9 @@ fail_ctrl:
+ return ERR_PTR(-EIO);
+
+ out_cleanup_admin_q:
+- blk_cleanup_queue(ctrl->ctrl.admin_q);
++ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
+ out_cleanup_fabrics_q:
+- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
++ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
+ out_free_admin_tag_set:
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ out_free_queues:
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 9f6614f7dbeb1..3516678d37541 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1760,7 +1760,7 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
+ * queue to flush these to completion.
+ */
+ nvme_start_admin_queue(&dev->ctrl);
+- blk_cleanup_queue(dev->ctrl.admin_q);
++ blk_mq_destroy_queue(dev->ctrl.admin_q);
+ blk_mq_free_tag_set(&dev->admin_tagset);
+ }
+ }
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 46c2dcf72f7ea..240024dd5d857 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -840,8 +840,8 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ bool remove)
+ {
+ if (remove) {
+- blk_cleanup_queue(ctrl->ctrl.admin_q);
+- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
++ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
++ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
+ }
+ if (ctrl->async_event_sqe.data) {
+@@ -935,10 +935,10 @@ out_stop_queue:
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
+ out_cleanup_queue:
+ if (new)
+- blk_cleanup_queue(ctrl->ctrl.admin_q);
++ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
+ out_cleanup_fabrics_q:
+ if (new)
+- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
++ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
+ out_free_tagset:
+ if (new)
+ blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
+@@ -957,7 +957,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
+ bool remove)
+ {
+ if (remove) {
+- blk_cleanup_queue(ctrl->ctrl.connect_q);
++ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
+ blk_mq_free_tag_set(ctrl->ctrl.tagset);
+ }
+ nvme_rdma_free_io_queues(ctrl);
+@@ -1012,7 +1012,7 @@ out_wait_freeze_timed_out:
+ out_cleanup_connect_q:
+ nvme_cancel_tagset(&ctrl->ctrl);
+ if (new)
+- blk_cleanup_queue(ctrl->ctrl.connect_q);
++ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
+ out_free_tag_set:
+ if (new)
+ blk_mq_free_tag_set(ctrl->ctrl.tagset);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index daa0e160e1212..d7e5bbdb9b75a 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1881,7 +1881,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
+ {
+ nvme_tcp_stop_io_queues(ctrl);
+ if (remove) {
+- blk_cleanup_queue(ctrl->connect_q);
++ blk_mq_destroy_queue(ctrl->connect_q);
+ blk_mq_free_tag_set(ctrl->tagset);
+ }
+ nvme_tcp_free_io_queues(ctrl);
+@@ -1936,7 +1936,7 @@ out_wait_freeze_timed_out:
+ out_cleanup_connect_q:
+ nvme_cancel_tagset(ctrl);
+ if (new)
+- blk_cleanup_queue(ctrl->connect_q);
++ blk_mq_destroy_queue(ctrl->connect_q);
+ out_free_tag_set:
+ if (new)
+ blk_mq_free_tag_set(ctrl->tagset);
+@@ -1949,8 +1949,8 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
+ {
+ nvme_tcp_stop_queue(ctrl, 0);
+ if (remove) {
+- blk_cleanup_queue(ctrl->admin_q);
+- blk_cleanup_queue(ctrl->fabrics_q);
++ blk_mq_destroy_queue(ctrl->admin_q);
++ blk_mq_destroy_queue(ctrl->fabrics_q);
+ blk_mq_free_tag_set(ctrl->admin_tagset);
+ }
+ nvme_tcp_free_admin_queue(ctrl);
+@@ -2008,10 +2008,10 @@ out_stop_queue:
+ nvme_cancel_admin_tagset(ctrl);
+ out_cleanup_queue:
+ if (new)
+- blk_cleanup_queue(ctrl->admin_q);
++ blk_mq_destroy_queue(ctrl->admin_q);
+ out_cleanup_fabrics_q:
+ if (new)
+- blk_cleanup_queue(ctrl->fabrics_q);
++ blk_mq_destroy_queue(ctrl->fabrics_q);
+ out_free_tagset:
+ if (new)
+ blk_mq_free_tag_set(ctrl->admin_tagset);
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index 59024af2da2e3..0f5c77e22a0a9 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -266,8 +266,8 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
+ if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
+ return;
+ nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+- blk_cleanup_queue(ctrl->ctrl.admin_q);
+- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
++ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
++ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ }
+
+@@ -283,7 +283,7 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+
+ if (nctrl->tagset) {
+- blk_cleanup_queue(ctrl->ctrl.connect_q);
++ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ }
+ kfree(ctrl->queues);
+@@ -410,9 +410,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
+
+ out_cleanup_queue:
+ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+- blk_cleanup_queue(ctrl->ctrl.admin_q);
++ blk_mq_destroy_queue(ctrl->ctrl.admin_q);
+ out_cleanup_fabrics_q:
+- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
++ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
+ out_free_tagset:
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ out_free_sq:
+@@ -554,7 +554,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
+ return 0;
+
+ out_cleanup_connect_q:
+- blk_cleanup_queue(ctrl->ctrl.connect_q);
++ blk_mq_destroy_queue(ctrl->ctrl.connect_q);
+ out_free_tagset:
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ out_destroy_queues:
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 80d8309652a4d..b80a9b74662b1 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -36,7 +36,7 @@
+ #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
+ #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
+
+-#define CMN_CHILD_NODE_ADDR GENMASK(27, 0)
++#define CMN_CHILD_NODE_ADDR GENMASK(29, 0)
+ #define CMN_CHILD_NODE_EXTERNAL BIT(31)
+
+ #define CMN_MAX_DIMENSION 12
+diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+index a4d7d9bd100d3..67712c77d806f 100644
+--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
++++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+@@ -274,7 +274,6 @@ struct mvebu_a3700_comphy_lane {
+ int submode;
+ bool invert_tx;
+ bool invert_rx;
+- bool needs_reset;
+ };
+
+ struct gbe_phy_init_data_fix {
+@@ -1097,40 +1096,12 @@ mvebu_a3700_comphy_pcie_power_off(struct mvebu_a3700_comphy_lane *lane)
+ 0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT);
+ }
+
+-static int mvebu_a3700_comphy_reset(struct phy *phy)
++static void mvebu_a3700_comphy_usb3_power_off(struct mvebu_a3700_comphy_lane *lane)
+ {
+- struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
+- u16 mask, data;
+-
+- dev_dbg(lane->dev, "resetting lane %d\n", lane->id);
+-
+- /* COMPHY reset for internal logic */
+- comphy_lane_reg_set(lane, COMPHY_SFT_RESET,
+- SFT_RST_NO_REG, SFT_RST_NO_REG);
+-
+- /* COMPHY register reset (cleared automatically) */
+- comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST);
+-
+- /* PIPE soft and register reset */
+- data = PIPE_SOFT_RESET | PIPE_REG_RESET;
+- mask = data;
+- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask);
+-
+- /* Release PIPE register reset */
+- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL,
+- 0x0, PIPE_REG_RESET);
+-
+- /* Reset SB configuration register (only for lanes 0 and 1) */
+- if (lane->id == 0 || lane->id == 1) {
+- u32 mask, data;
+-
+- data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT |
+- PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT;
+- mask = data | PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT;
+- comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask);
+- }
+-
+- return 0;
++ /*
++ * The USB3 MAC sets the USB3 PHY to low state, so we do not
++ * need to power off USB3 PHY again.
++ */
+ }
+
+ static bool mvebu_a3700_comphy_check_mode(int lane,
+@@ -1171,10 +1142,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
+ (lane->mode != mode || lane->submode != submode))
+ return -EBUSY;
+
+- /* If changing mode, ensure reset is called */
+- if (lane->mode != PHY_MODE_INVALID && lane->mode != mode)
+- lane->needs_reset = true;
+-
+ /* Just remember the mode, ->power_on() will do the real setup */
+ lane->mode = mode;
+ lane->submode = submode;
+@@ -1185,7 +1152,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
+ static int mvebu_a3700_comphy_power_on(struct phy *phy)
+ {
+ struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
+- int ret;
+
+ if (!mvebu_a3700_comphy_check_mode(lane->id, lane->mode,
+ lane->submode)) {
+@@ -1193,14 +1159,6 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
+ return -EINVAL;
+ }
+
+- if (lane->needs_reset) {
+- ret = mvebu_a3700_comphy_reset(phy);
+- if (ret)
+- return ret;
+-
+- lane->needs_reset = false;
+- }
+-
+ switch (lane->mode) {
+ case PHY_MODE_USB_HOST_SS:
+ dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id);
+@@ -1224,38 +1182,28 @@ static int mvebu_a3700_comphy_power_off(struct phy *phy)
+ {
+ struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
+
+- switch (lane->mode) {
+- case PHY_MODE_USB_HOST_SS:
+- /*
+- * The USB3 MAC sets the USB3 PHY to low state, so we do not
+- * need to power off USB3 PHY again.
+- */
+- break;
+-
+- case PHY_MODE_SATA:
+- mvebu_a3700_comphy_sata_power_off(lane);
+- break;
+-
+- case PHY_MODE_ETHERNET:
++ switch (lane->id) {
++ case 0:
++ mvebu_a3700_comphy_usb3_power_off(lane);
+ mvebu_a3700_comphy_ethernet_power_off(lane);
+- break;
+-
+- case PHY_MODE_PCIE:
++ return 0;
++ case 1:
+ mvebu_a3700_comphy_pcie_power_off(lane);
+- break;
+-
++ mvebu_a3700_comphy_ethernet_power_off(lane);
++ return 0;
++ case 2:
++ mvebu_a3700_comphy_usb3_power_off(lane);
++ mvebu_a3700_comphy_sata_power_off(lane);
++ return 0;
+ default:
+ dev_err(lane->dev, "invalid COMPHY mode\n");
+ return -EINVAL;
+ }
+-
+- return 0;
+ }
+
+ static const struct phy_ops mvebu_a3700_comphy_ops = {
+ .power_on = mvebu_a3700_comphy_power_on,
+ .power_off = mvebu_a3700_comphy_power_off,
+- .reset = mvebu_a3700_comphy_reset,
+ .set_mode = mvebu_a3700_comphy_set_mode,
+ .owner = THIS_MODULE,
+ };
+@@ -1393,8 +1341,7 @@ static int mvebu_a3700_comphy_probe(struct platform_device *pdev)
+ * To avoid relying on the bootloader/firmware configuration,
+ * power off all comphys.
+ */
+- mvebu_a3700_comphy_reset(phy);
+- lane->needs_reset = false;
++ mvebu_a3700_comphy_power_off(phy);
+ }
+
+ provider = devm_of_phy_provider_register(&pdev->dev,
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index ba6d787896606..e8489331f12b8 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -3280,7 +3280,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
+ static void dasd_free_queue(struct dasd_block *block)
+ {
+ if (block->request_queue) {
+- blk_cleanup_queue(block->request_queue);
++ blk_mq_destroy_queue(block->request_queue);
+ blk_mq_free_tag_set(&block->tag_set);
+ block->request_queue = NULL;
+ }
+diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
+index dc78a523a69f2..b6b938aa66158 100644
+--- a/drivers/s390/block/dasd_alias.c
++++ b/drivers/s390/block/dasd_alias.c
+@@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device)
+ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
+ {
+ struct dasd_eckd_private *alias_priv, *private = base_device->private;
+- struct alias_pav_group *group = private->pavgroup;
+ struct alias_lcu *lcu = private->lcu;
+ struct dasd_device *alias_device;
++ struct alias_pav_group *group;
+ unsigned long flags;
+
+- if (!group || !lcu)
++ if (!lcu)
+ return NULL;
+ if (lcu->pav == NO_PAV ||
+ lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
+@@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
+ }
+
+ spin_lock_irqsave(&lcu->lock, flags);
++ group = private->pavgroup;
++ if (!group) {
++ spin_unlock_irqrestore(&lcu->lock, flags);
++ return NULL;
++ }
+ alias_device = group->next;
+ if (!alias_device) {
+ if (list_empty(&group->aliaslist)) {
+diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
+index a7a33ebf4bbe9..5a83f0a39901b 100644
+--- a/drivers/s390/block/dasd_genhd.c
++++ b/drivers/s390/block/dasd_genhd.c
+@@ -41,8 +41,8 @@ int dasd_gendisk_alloc(struct dasd_block *block)
+ if (base->devindex >= DASD_PER_MAJOR)
+ return -EBUSY;
+
+- gdp = __alloc_disk_node(block->request_queue, NUMA_NO_NODE,
+- &dasd_bio_compl_lkclass);
++ gdp = blk_mq_alloc_disk_for_queue(block->request_queue,
++ &dasd_bio_compl_lkclass);
+ if (!gdp)
+ return -ENOMEM;
+
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 8352f90d997df..ae9a107c520d0 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -182,6 +182,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
+ mutex_unlock(&shost->scan_mutex);
+ scsi_proc_host_rm(shost);
+
++ /*
++ * New SCSI devices cannot be attached anymore because of the SCSI host
++ * state so drop the tag set refcnt. Wait until the tag set refcnt drops
++ * to zero because .exit_cmd_priv implementations may need the host
++ * pointer.
++ */
++ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
++ wait_for_completion(&shost->tagset_freed);
++
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (scsi_host_set_state(shost, SHOST_DEL))
+ BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
+@@ -240,6 +249,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
+ if (error)
+ goto fail;
+
++ kref_init(&shost->tagset_refcnt);
++ init_completion(&shost->tagset_freed);
++
+ /*
+ * Increase usage count temporarily here so that calling
+ * scsi_autopm_put_host() will trigger runtime idle if there is
+@@ -312,6 +324,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
+ pm_runtime_disable(&shost->shost_gendev);
+ pm_runtime_set_suspended(&shost->shost_gendev);
+ pm_runtime_put_noidle(&shost->shost_gendev);
++ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
+ fail:
+ return error;
+ }
+@@ -345,9 +358,6 @@ static void scsi_host_dev_release(struct device *dev)
+ kfree(dev_name(&shost->shost_dev));
+ }
+
+- if (shost->tag_set.tags)
+- scsi_mq_destroy_tags(shost);
+-
+ kfree(shost->shost_data);
+
+ ida_simple_remove(&host_index_ida, shost->host_no);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 9a1ae52bb621d..a6d3471a61057 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -2993,7 +2993,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
+
+ if (ioc->is_mcpu_endpoint ||
+ sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
+- dma_get_required_mask(&pdev->dev) <= 32)
++ dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
+ ioc->dma_mask = 32;
+ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+ else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 62666df1a59eb..4acff4e84b909 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -2151,8 +2151,10 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+
+ abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
+ le32_to_cpu(abts->exchange_addr_to_abort));
+- if (!abort_cmd)
++ if (!abort_cmd) {
++ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EIO;
++ }
+ mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
+
+ if (abort_cmd->qpair) {
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index f5c876d03c1ad..7e990f7a9f164 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -168,7 +168,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
+ * Requeue this command. It will go before all other commands
+ * that are already in the queue. Schedule requeue work under
+ * lock such that the kblockd_schedule_work() call happens
+- * before blk_cleanup_queue() finishes.
++ * before blk_mq_destroy_queue() finishes.
+ */
+ cmd->result = 0;
+
+@@ -429,9 +429,9 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
+ * it and the queue. Mitigate by taking a reference to the
+ * queue and never touching the sdev again after we drop the
+ * host lock. Note: if __scsi_remove_device() invokes
+- * blk_cleanup_queue() before the queue is run from this
++ * blk_mq_destroy_queue() before the queue is run from this
+ * function then blk_run_queue() will return immediately since
+- * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
++ * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING.
+ */
+ slq = sdev->request_queue;
+ if (!blk_get_queue(slq))
+@@ -1995,9 +1995,13 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
+ return blk_mq_alloc_tag_set(tag_set);
+ }
+
+-void scsi_mq_destroy_tags(struct Scsi_Host *shost)
++void scsi_mq_free_tags(struct kref *kref)
+ {
++ struct Scsi_Host *shost = container_of(kref, typeof(*shost),
++ tagset_refcnt);
++
+ blk_mq_free_tag_set(&shost->tag_set);
++ complete(&shost->tagset_freed);
+ }
+
+ /**
+diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
+index 5c4786310a31d..a0ee31d55f5f1 100644
+--- a/drivers/scsi/scsi_priv.h
++++ b/drivers/scsi/scsi_priv.h
+@@ -94,7 +94,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost);
+ extern void scsi_requeue_run_queue(struct work_struct *work);
+ extern void scsi_start_queue(struct scsi_device *sdev);
+ extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
+-extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
++extern void scsi_mq_free_tags(struct kref *kref);
+ extern void scsi_exit_queue(void);
+ extern void scsi_evt_thread(struct work_struct *work);
+
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 91ac901a66826..5d27f5196de6f 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -340,6 +340,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
+ kfree(sdev);
+ goto out;
+ }
++ kref_get(&sdev->host->tagset_refcnt);
+ sdev->request_queue = q;
+ q->queuedata = sdev;
+ __scsi_init_queue(sdev->host, q);
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 43949798a2e47..5d61f58399dca 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -1475,7 +1475,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
+ scsi_device_set_state(sdev, SDEV_DEL);
+ mutex_unlock(&sdev->state_mutex);
+
+- blk_cleanup_queue(sdev->request_queue);
++ blk_mq_destroy_queue(sdev->request_queue);
++ kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
+ cancel_work_sync(&sdev->requeue_work);
+
+ if (sdev->host->hostt->slave_destroy)
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index a1a2ac09066fd..cb587e488601c 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3440,8 +3440,8 @@ static int sd_probe(struct device *dev)
+ if (!sdkp)
+ goto out;
+
+- gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE,
+- &sd_bio_compl_lkclass);
++ gd = blk_mq_alloc_disk_for_queue(sdp->request_queue,
++ &sd_bio_compl_lkclass);
+ if (!gd)
+ goto out_free;
+
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index 32d3b8274f148..a278b739d0c5f 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -624,8 +624,8 @@ static int sr_probe(struct device *dev)
+ if (!cd)
+ goto fail;
+
+- disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE,
+- &sr_bio_compl_lkclass);
++ disk = blk_mq_alloc_disk_for_queue(sdev->request_queue,
++ &sr_bio_compl_lkclass);
+ if (!disk)
+ goto fail_free;
+ mutex_init(&cd->lock);
+diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
+index fff0c740c8f33..6f088dd0ba4f3 100644
+--- a/drivers/thunderbolt/icm.c
++++ b/drivers/thunderbolt/icm.c
+@@ -2527,6 +2527,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
+ tb->cm_ops = &icm_icl_ops;
+ break;
+
++ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
+ icm->is_supported = icm_tgl_is_supported;
+ icm->get_mode = icm_ar_get_mode;
+diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
+index 69083aab2736c..5091677b3f4ba 100644
+--- a/drivers/thunderbolt/nhi.h
++++ b/drivers/thunderbolt/nhi.h
+@@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops;
+ * need for the PCI quirk anymore as we will use ICM also on Apple
+ * hardware.
+ */
++#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134
+ #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137
+ #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
+ #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 2945c1b890880..cb83c66bd8a82 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -2706,14 +2706,15 @@ static int lpuart_probe(struct platform_device *pdev)
+ lpuart_reg.cons = LPUART_CONSOLE;
+ handler = lpuart_int;
+ }
+- ret = uart_add_one_port(&lpuart_reg, &sport->port);
+- if (ret)
+- goto failed_attach_port;
+
+ ret = lpuart_global_reset(sport);
+ if (ret)
+ goto failed_reset;
+
++ ret = uart_add_one_port(&lpuart_reg, &sport->port);
++ if (ret)
++ goto failed_attach_port;
++
+ ret = uart_get_rs485_mode(&sport->port);
+ if (ret)
+ goto failed_get_rs485;
+@@ -2736,9 +2737,9 @@ static int lpuart_probe(struct platform_device *pdev)
+
+ failed_irq_request:
+ failed_get_rs485:
+-failed_reset:
+ uart_remove_one_port(&lpuart_reg, &sport->port);
+ failed_attach_port:
++failed_reset:
+ lpuart_disable_clks(sport);
+ return ret;
+ }
+diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
+index d942ab152f5a4..24aa1dcc5ef7a 100644
+--- a/drivers/tty/serial/serial-tegra.c
++++ b/drivers/tty/serial/serial-tegra.c
+@@ -525,7 +525,7 @@ static void tegra_uart_tx_dma_complete(void *args)
+ count = tup->tx_bytes_requested - state.residue;
+ async_tx_ack(tup->tx_dma_desc);
+ spin_lock_irqsave(&tup->uport.lock, flags);
+- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
++ uart_xmit_advance(&tup->uport, count);
+ tup->tx_in_progress = 0;
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&tup->uport);
+@@ -613,7 +613,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u)
+ static void tegra_uart_stop_tx(struct uart_port *u)
+ {
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+- struct circ_buf *xmit = &tup->uport.state->xmit;
+ struct dma_tx_state state;
+ unsigned int count;
+
+@@ -624,7 +623,7 @@ static void tegra_uart_stop_tx(struct uart_port *u)
+ dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
+ count = tup->tx_bytes_requested - state.residue;
+ async_tx_ack(tup->tx_dma_desc);
+- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
++ uart_xmit_advance(&tup->uport, count);
+ tup->tx_in_progress = 0;
+ }
+
+diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
+index 4877c54c613d1..889b701ba7c62 100644
+--- a/drivers/tty/serial/tegra-tcu.c
++++ b/drivers/tty/serial/tegra-tcu.c
+@@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port)
+ break;
+
+ tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
+- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
++ uart_xmit_advance(port, count);
+ }
+
+ uart_write_wakeup(port);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 829da9cb14a86..55bb0d0422d52 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -9519,7 +9519,7 @@ void ufshcd_remove(struct ufs_hba *hba)
+ ufs_bsg_remove(hba);
+ ufshpb_remove(hba);
+ ufs_sysfs_remove_nodes(hba->dev);
+- blk_cleanup_queue(hba->tmf_queue);
++ blk_mq_destroy_queue(hba->tmf_queue);
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+ scsi_remove_host(hba->host);
+ /* disable interrupts */
+@@ -9815,7 +9815,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ return 0;
+
+ free_tmf_queue:
+- blk_cleanup_queue(hba->tmf_queue);
++ blk_mq_destroy_queue(hba->tmf_queue);
+ free_tmf_tag_set:
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
+ out_remove_scsi_host:
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index dfef85a18eb55..80b29f937c605 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -6049,7 +6049,7 @@ re_enumerate:
+ *
+ * Return: The same as for usb_reset_and_verify_device().
+ * However, if a reset is already in progress (for instance, if a
+- * driver doesn't have pre_ or post_reset() callbacks, and while
++ * driver doesn't have pre_reset() or post_reset() callbacks, and while
+ * being unbound or re-bound during the ongoing reset its disconnect()
+ * or probe() routine tries to perform a second, nested reset), the
+ * routine returns -EINPROGRESS.
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 1db9f51f98aef..08ca65ffe57b7 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1718,12 +1718,6 @@ static int dwc3_probe(struct platform_device *pdev)
+
+ dwc3_get_properties(dwc);
+
+- if (!dwc->sysdev_is_parent) {
+- ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
+- if (ret)
+- return ret;
+- }
+-
+ dwc->reset = devm_reset_control_array_get_optional_shared(dev);
+ if (IS_ERR(dwc->reset))
+ return PTR_ERR(dwc->reset);
+@@ -1789,6 +1783,13 @@ static int dwc3_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, dwc);
+ dwc3_cache_hwparams(dwc);
+
++ if (!dwc->sysdev_is_parent &&
++ DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
++ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
++ if (ret)
++ goto disable_clks;
++ }
++
+ spin_lock_init(&dwc->lock);
+ mutex_init(&dwc->mutex);
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index a5e8374a8d710..697683e3fbffa 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -256,6 +256,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EM060K 0x030b
+ #define QUECTEL_PRODUCT_EM12 0x0512
+ #define QUECTEL_PRODUCT_RM500Q 0x0800
++#define QUECTEL_PRODUCT_RM520N 0x0801
+ #define QUECTEL_PRODUCT_EC200S_CN 0x6002
+ #define QUECTEL_PRODUCT_EC200T 0x6026
+ #define QUECTEL_PRODUCT_RM500K 0x7001
+@@ -1138,6 +1139,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
++ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
++ .driver_info = ZLP },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
+@@ -1159,6 +1162,9 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
+ .driver_info = ZLP },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
+diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
+index d5f3f763717ea..d4b2519257962 100644
+--- a/drivers/xen/xenbus/xenbus_client.c
++++ b/drivers/xen/xenbus/xenbus_client.c
+@@ -382,9 +382,10 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
+ unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
+ grant_ref_t gref_head;
+ unsigned int i;
++ void *addr;
+ int ret;
+
+- *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
++ addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
+ if (!*vaddr) {
+ ret = -ENOMEM;
+ goto err;
+@@ -401,13 +402,15 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
+ unsigned long gfn;
+
+ if (is_vmalloc_addr(*vaddr))
+- gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i]));
++ gfn = pfn_to_gfn(vmalloc_to_pfn(addr));
+ else
+- gfn = virt_to_gfn(vaddr[i]);
++ gfn = virt_to_gfn(addr);
+
+ grefs[i] = gnttab_claim_grant_reference(&gref_head);
+ gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
+ gfn, 0);
++
++ addr += XEN_PAGE_SIZE;
+ }
+
+ return 0;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 781952c5a5c23..20ad619a8a973 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4586,6 +4586,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+
+ set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
+
++ /*
++ * If we had UNFINISHED_DROPS we could still be processing them, so
++ * clear that bit and wake up relocation so it can stop.
++ * We must do this before stopping the block group reclaim task, because
++ * at btrfs_relocate_block_group() we wait for this bit, and after the
++ * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
++ * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
++ * return 1.
++ */
++ btrfs_wake_unfinished_drop(fs_info);
++
+ /*
+ * We may have the reclaim task running and relocating a data block group,
+ * in which case it may create delayed iputs. So stop it before we park
+@@ -4604,12 +4615,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ */
+ kthread_park(fs_info->cleaner_kthread);
+
+- /*
+- * If we had UNFINISHED_DROPS we could still be processing them, so
+- * clear that bit and wake up relocation so it can stop.
+- */
+- btrfs_wake_unfinished_drop(fs_info);
+-
+ /* wait for the qgroup rescan worker to stop */
+ btrfs_qgroup_wait_for_completion(fs_info, false);
+
+@@ -4632,6 +4637,31 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ /* clear out the rbtree of defraggable inodes */
+ btrfs_cleanup_defrag_inodes(fs_info);
+
++ /*
++ * After we parked the cleaner kthread, ordered extents may have
++ * completed and created new delayed iputs. If one of the async reclaim
++ * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
++ * can hang forever trying to stop it, because if a delayed iput is
++ * added after it ran btrfs_run_delayed_iputs() and before it called
++ * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
++ * no one else to run iputs.
++ *
++ * So wait for all ongoing ordered extents to complete and then run
++ * delayed iputs. This works because once we reach this point no one
++ * can either create new ordered extents nor create delayed iputs
++ * through some other means.
++ *
++ * Also note that btrfs_wait_ordered_roots() is not safe here, because
++ * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
++ * but the delayed iput for the respective inode is made only when doing
++ * the final btrfs_put_ordered_extent() (which must happen at
++ * btrfs_finish_ordered_io() when we are unmounting).
++ */
++ btrfs_flush_workqueue(fs_info->endio_write_workers);
++ /* Ordered extents for free space inodes. */
++ btrfs_flush_workqueue(fs_info->endio_freespace_worker);
++ btrfs_run_delayed_iputs(fs_info);
++
+ cancel_work_sync(&fs_info->async_reclaim_work);
+ cancel_work_sync(&fs_info->async_data_reclaim_work);
+ cancel_work_sync(&fs_info->preempt_reclaim_work);
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 1386362fad3b8..4448b7b6ea221 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1918,10 +1918,44 @@ out_unlock:
+ return ret;
+ }
+
++static void wait_eb_writebacks(struct btrfs_block_group *block_group)
++{
++ struct btrfs_fs_info *fs_info = block_group->fs_info;
++ const u64 end = block_group->start + block_group->length;
++ struct radix_tree_iter iter;
++ struct extent_buffer *eb;
++ void __rcu **slot;
++
++ rcu_read_lock();
++ radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
++ block_group->start >> fs_info->sectorsize_bits) {
++ eb = radix_tree_deref_slot(slot);
++ if (!eb)
++ continue;
++ if (radix_tree_deref_retry(eb)) {
++ slot = radix_tree_iter_retry(&iter);
++ continue;
++ }
++
++ if (eb->start < block_group->start)
++ continue;
++ if (eb->start >= end)
++ break;
++
++ slot = radix_tree_iter_resume(slot, &iter);
++ rcu_read_unlock();
++ wait_on_extent_buffer_writeback(eb);
++ rcu_read_lock();
++ }
++ rcu_read_unlock();
++}
++
+ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
+ {
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct map_lookup *map;
++ const bool is_metadata = (block_group->flags &
++ (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
+ int ret = 0;
+ int i;
+
+@@ -1932,8 +1966,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
+ }
+
+ /* Check if we have unwritten allocated space */
+- if ((block_group->flags &
+- (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) &&
++ if (is_metadata &&
+ block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
+ spin_unlock(&block_group->lock);
+ return -EAGAIN;
+@@ -1958,6 +1991,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
+ /* No need to wait for NOCOW writers. Zoned mode does not allow that */
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
+ block_group->length);
++ /* Wait for extent buffers to be written. */
++ if (is_metadata)
++ wait_eb_writebacks(block_group);
+
+ spin_lock(&block_group->lock);
+
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 8f2e003e05907..97278c43f8dc0 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1232,6 +1232,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ lock_two_nondirectories(target_inode, src_inode);
+
+ cifs_dbg(FYI, "about to flush pages\n");
++
++ rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
++ off + len - 1);
++ if (rc)
++ goto out;
++
+ /* should we flush first and last page first */
+ truncate_inode_pages(&target_inode->i_data, 0);
+
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index e8a8daa82ed76..cc180d37b8ce1 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1886,17 +1886,8 @@ smb2_copychunk_range(const unsigned int xid,
+ int chunks_copied = 0;
+ bool chunk_sizes_updated = false;
+ ssize_t bytes_written, total_bytes_written = 0;
+- struct inode *inode;
+
+ pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
+-
+- /*
+- * We need to flush all unwritten data before we can send the
+- * copychunk ioctl to the server.
+- */
+- inode = d_inode(trgtfile->dentry);
+- filemap_write_and_wait(inode->i_mapping);
+-
+ if (pcchunk == NULL)
+ return -ENOMEM;
+
+@@ -3961,39 +3952,50 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
+ {
+ int rc;
+ unsigned int xid;
+- struct inode *inode;
++ struct inode *inode = file_inode(file);
+ struct cifsFileInfo *cfile = file->private_data;
+- struct cifsInodeInfo *cifsi;
++ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ __le64 eof;
++ loff_t old_eof;
+
+ xid = get_xid();
+
+- inode = d_inode(cfile->dentry);
+- cifsi = CIFS_I(inode);
++ inode_lock(inode);
+
+- if (off >= i_size_read(inode) ||
+- off + len >= i_size_read(inode)) {
++ old_eof = i_size_read(inode);
++ if ((off >= old_eof) ||
++ off + len >= old_eof) {
+ rc = -EINVAL;
+ goto out;
+ }
+
++ filemap_invalidate_lock(inode->i_mapping);
++ rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof - 1);
++ if (rc < 0)
++ goto out_2;
++
++ truncate_pagecache_range(inode, off, old_eof);
++
+ rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
+- i_size_read(inode) - off - len, off);
++ old_eof - off - len, off);
+ if (rc < 0)
+- goto out;
++ goto out_2;
+
+- eof = cpu_to_le64(i_size_read(inode) - len);
++ eof = cpu_to_le64(old_eof - len);
+ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, cfile->pid, &eof);
+ if (rc < 0)
+- goto out;
++ goto out_2;
+
+ rc = 0;
+
+ cifsi->server_eof = i_size_read(inode) - len;
+ truncate_setsize(inode, cifsi->server_eof);
+ fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof);
++out_2:
++ filemap_invalidate_unlock(inode->i_mapping);
+ out:
++ inode_unlock(inode);
+ free_xid(xid);
+ return rc;
+ }
+@@ -4004,34 +4006,47 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
+ int rc;
+ unsigned int xid;
+ struct cifsFileInfo *cfile = file->private_data;
++ struct inode *inode = file_inode(file);
+ __le64 eof;
+- __u64 count;
++ __u64 count, old_eof;
+
+ xid = get_xid();
+
+- if (off >= i_size_read(file->f_inode)) {
++ inode_lock(inode);
++
++ old_eof = i_size_read(inode);
++ if (off >= old_eof) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+- count = i_size_read(file->f_inode) - off;
+- eof = cpu_to_le64(i_size_read(file->f_inode) + len);
++ count = old_eof - off;
++ eof = cpu_to_le64(old_eof + len);
++
++ filemap_invalidate_lock(inode->i_mapping);
++ rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof + len - 1);
++ if (rc < 0)
++ goto out_2;
++ truncate_pagecache_range(inode, off, old_eof);
+
+ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, cfile->pid, &eof);
+ if (rc < 0)
+- goto out;
++ goto out_2;
+
+ rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
+ if (rc < 0)
+- goto out;
++ goto out_2;
+
+- rc = smb3_zero_range(file, tcon, off, len, 1);
++ rc = smb3_zero_data(file, tcon, off, len, xid);
+ if (rc < 0)
+- goto out;
++ goto out_2;
+
+ rc = 0;
++out_2:
++ filemap_invalidate_unlock(inode->i_mapping);
+ out:
++ inode_unlock(inode);
+ free_xid(xid);
+ return rc;
+ }
+diff --git a/fs/dax.c b/fs/dax.c
+index 4155a6107fa10..7ab248ed21aa3 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -1241,6 +1241,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t done = 0;
+ int ret;
+
++ if (!iomi.len)
++ return 0;
++
+ if (iov_iter_rw(iter) == WRITE) {
+ lockdep_assert_held_write(&iomi.inode->i_rwsem);
+ iomi.flags |= IOMAP_WRITE;
+diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
+index 9de6a6b844c9e..e541a004f8efa 100644
+--- a/fs/exfat/fatent.c
++++ b/fs/exfat/fatent.c
+@@ -270,8 +270,7 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
+ struct super_block *sb = dir->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct buffer_head *bh;
+- sector_t blknr, last_blknr;
+- int i;
++ sector_t blknr, last_blknr, i;
+
+ blknr = exfat_cluster_to_sector(sbi, clu);
+ last_blknr = blknr + sbi->sect_per_clus;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index adfc30ee4b7be..0d86931269bfc 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -167,8 +167,6 @@ enum SHIFT_DIRECTION {
+ #define EXT4_MB_CR0_OPTIMIZED 0x8000
+ /* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */
+ #define EXT4_MB_CR1_OPTIMIZED 0x00010000
+-/* Perform linear traversal for one group */
+-#define EXT4_MB_SEARCH_NEXT_LINEAR 0x00020000
+ struct ext4_allocation_request {
+ /* target inode for block we're allocating */
+ struct inode *inode;
+@@ -1589,8 +1587,8 @@ struct ext4_sb_info {
+ struct list_head s_discard_list;
+ struct work_struct s_discard_work;
+ atomic_t s_retry_alloc_pending;
+- struct rb_root s_mb_avg_fragment_size_root;
+- rwlock_t s_mb_rb_lock;
++ struct list_head *s_mb_avg_fragment_size;
++ rwlock_t *s_mb_avg_fragment_size_locks;
+ struct list_head *s_mb_largest_free_orders;
+ rwlock_t *s_mb_largest_free_orders_locks;
+
+@@ -3402,6 +3400,8 @@ struct ext4_group_info {
+ ext4_grpblk_t bb_first_free; /* first free block */
+ ext4_grpblk_t bb_free; /* total free blocks */
+ ext4_grpblk_t bb_fragments; /* nr of freespace fragments */
++ int bb_avg_fragment_size_order; /* order of average
++ fragment in BG */
+ ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */
+ ext4_group_t bb_group; /* Group number */
+ struct list_head bb_prealloc_list;
+@@ -3409,7 +3409,7 @@ struct ext4_group_info {
+ void *bb_bitmap;
+ #endif
+ struct rw_semaphore alloc_sem;
+- struct rb_node bb_avg_fragment_size_rb;
++ struct list_head bb_avg_fragment_size_node;
+ struct list_head bb_largest_free_order_node;
+ ext4_grpblk_t bb_counters[]; /* Nr of free power-of-two-block
+ * regions, index is order.
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index c148bb97b5273..5235974126bd3 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -460,6 +460,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
+ error_msg = "invalid eh_entries";
+ goto corrupted;
+ }
++ if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
++ error_msg = "eh_entries is 0 but eh_depth is > 0";
++ goto corrupted;
++ }
+ if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
+ error_msg = "invalid extent entries";
+ goto corrupted;
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index f73e5eb43eae1..208b87ce88588 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -510,7 +510,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
+ goto fallback;
+ }
+
+- max_dirs = ndirs / ngroups + inodes_per_group / 16;
++ max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
+ min_inodes = avefreei - inodes_per_group*flex_size / 4;
+ if (min_inodes < 1)
+ min_inodes = 1;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 38e7dc2531b17..fd29e15d1c3b5 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -140,13 +140,15 @@
+ * number of buddy bitmap orders possible) number of lists. Group-infos are
+ * placed in appropriate lists.
+ *
+- * 2) Average fragment size rb tree (sbi->s_mb_avg_fragment_size_root)
++ * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
+ *
+- * Locking: sbi->s_mb_rb_lock (rwlock)
++ * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
+ *
+- * This is a red black tree consisting of group infos and the tree is sorted
+- * by average fragment sizes (which is calculated as ext4_group_info->bb_free
+- * / ext4_group_info->bb_fragments).
++ * This is an array of lists where in the i-th list there are groups with
++ * average fragment size >= 2^i and < 2^(i+1). The average fragment size
++ * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
++ * Note that we don't bother with a special list for completely empty groups
++ * so we only have MB_NUM_ORDERS(sb) lists.
+ *
+ * When "mb_optimize_scan" mount option is set, mballoc consults the above data
+ * structures to decide the order in which groups are to be traversed for
+@@ -160,7 +162,8 @@
+ *
+ * At CR = 1, we only consider groups where average fragment size > request
+ * size. So, we lookup a group which has average fragment size just above or
+- * equal to request size using our rb tree (data structure 2) in O(log N) time.
++ * equal to request size using our average fragment size group lists (data
++ * structure 2) in O(1) time.
+ *
+ * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
+ * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
+@@ -802,65 +805,51 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
+ }
+ }
+
+-static void ext4_mb_rb_insert(struct rb_root *root, struct rb_node *new,
+- int (*cmp)(struct rb_node *, struct rb_node *))
++static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
+ {
+- struct rb_node **iter = &root->rb_node, *parent = NULL;
++ int order;
+
+- while (*iter) {
+- parent = *iter;
+- if (cmp(new, *iter) > 0)
+- iter = &((*iter)->rb_left);
+- else
+- iter = &((*iter)->rb_right);
+- }
+-
+- rb_link_node(new, parent, iter);
+- rb_insert_color(new, root);
+-}
+-
+-static int
+-ext4_mb_avg_fragment_size_cmp(struct rb_node *rb1, struct rb_node *rb2)
+-{
+- struct ext4_group_info *grp1 = rb_entry(rb1,
+- struct ext4_group_info,
+- bb_avg_fragment_size_rb);
+- struct ext4_group_info *grp2 = rb_entry(rb2,
+- struct ext4_group_info,
+- bb_avg_fragment_size_rb);
+- int num_frags_1, num_frags_2;
+-
+- num_frags_1 = grp1->bb_fragments ?
+- grp1->bb_free / grp1->bb_fragments : 0;
+- num_frags_2 = grp2->bb_fragments ?
+- grp2->bb_free / grp2->bb_fragments : 0;
+-
+- return (num_frags_2 - num_frags_1);
++ /*
++ * We don't bother with a special lists groups with only 1 block free
++ * extents and for completely empty groups.
++ */
++ order = fls(len) - 2;
++ if (order < 0)
++ return 0;
++ if (order == MB_NUM_ORDERS(sb))
++ order--;
++ return order;
+ }
+
+-/*
+- * Reinsert grpinfo into the avg_fragment_size tree with new average
+- * fragment size.
+- */
++/* Move group to appropriate avg_fragment_size list */
+ static void
+ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ int new_order;
+
+ if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
+ return;
+
+- write_lock(&sbi->s_mb_rb_lock);
+- if (!RB_EMPTY_NODE(&grp->bb_avg_fragment_size_rb)) {
+- rb_erase(&grp->bb_avg_fragment_size_rb,
+- &sbi->s_mb_avg_fragment_size_root);
+- RB_CLEAR_NODE(&grp->bb_avg_fragment_size_rb);
+- }
++ new_order = mb_avg_fragment_size_order(sb,
++ grp->bb_free / grp->bb_fragments);
++ if (new_order == grp->bb_avg_fragment_size_order)
++ return;
+
+- ext4_mb_rb_insert(&sbi->s_mb_avg_fragment_size_root,
+- &grp->bb_avg_fragment_size_rb,
+- ext4_mb_avg_fragment_size_cmp);
+- write_unlock(&sbi->s_mb_rb_lock);
++ if (grp->bb_avg_fragment_size_order != -1) {
++ write_lock(&sbi->s_mb_avg_fragment_size_locks[
++ grp->bb_avg_fragment_size_order]);
++ list_del(&grp->bb_avg_fragment_size_node);
++ write_unlock(&sbi->s_mb_avg_fragment_size_locks[
++ grp->bb_avg_fragment_size_order]);
++ }
++ grp->bb_avg_fragment_size_order = new_order;
++ write_lock(&sbi->s_mb_avg_fragment_size_locks[
++ grp->bb_avg_fragment_size_order]);
++ list_add_tail(&grp->bb_avg_fragment_size_node,
++ &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
++ write_unlock(&sbi->s_mb_avg_fragment_size_locks[
++ grp->bb_avg_fragment_size_order]);
+ }
+
+ /*
+@@ -909,86 +898,55 @@ static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
+ *new_cr = 1;
+ } else {
+ *group = grp->bb_group;
+- ac->ac_last_optimal_group = *group;
+ ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
+ }
+ }
+
+ /*
+- * Choose next group by traversing average fragment size tree. Updates *new_cr
+- * if cr lvel needs an update. Sets EXT4_MB_SEARCH_NEXT_LINEAR to indicate that
+- * the linear search should continue for one iteration since there's lock
+- * contention on the rb tree lock.
++ * Choose next group by traversing average fragment size list of suitable
++ * order. Updates *new_cr if cr level needs an update.
+ */
+ static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
+ int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+- int avg_fragment_size, best_so_far;
+- struct rb_node *node, *found;
+- struct ext4_group_info *grp;
+-
+- /*
+- * If there is contention on the lock, instead of waiting for the lock
+- * to become available, just continue searching lineraly. We'll resume
+- * our rb tree search later starting at ac->ac_last_optimal_group.
+- */
+- if (!read_trylock(&sbi->s_mb_rb_lock)) {
+- ac->ac_flags |= EXT4_MB_SEARCH_NEXT_LINEAR;
+- return;
+- }
++ struct ext4_group_info *grp = NULL, *iter;
++ int i;
+
+ if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
+ if (sbi->s_mb_stats)
+ atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
+- /* We have found something at CR 1 in the past */
+- grp = ext4_get_group_info(ac->ac_sb, ac->ac_last_optimal_group);
+- for (found = rb_next(&grp->bb_avg_fragment_size_rb); found != NULL;
+- found = rb_next(found)) {
+- grp = rb_entry(found, struct ext4_group_info,
+- bb_avg_fragment_size_rb);
++ }
++
++ for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
++ i < MB_NUM_ORDERS(ac->ac_sb); i++) {
++ if (list_empty(&sbi->s_mb_avg_fragment_size[i]))
++ continue;
++ read_lock(&sbi->s_mb_avg_fragment_size_locks[i]);
++ if (list_empty(&sbi->s_mb_avg_fragment_size[i])) {
++ read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
++ continue;
++ }
++ list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i],
++ bb_avg_fragment_size_node) {
+ if (sbi->s_mb_stats)
+ atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
+- if (likely(ext4_mb_good_group(ac, grp->bb_group, 1)))
++ if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) {
++ grp = iter;
+ break;
+- }
+- goto done;
+- }
+-
+- node = sbi->s_mb_avg_fragment_size_root.rb_node;
+- best_so_far = 0;
+- found = NULL;
+-
+- while (node) {
+- grp = rb_entry(node, struct ext4_group_info,
+- bb_avg_fragment_size_rb);
+- avg_fragment_size = 0;
+- if (ext4_mb_good_group(ac, grp->bb_group, 1)) {
+- avg_fragment_size = grp->bb_fragments ?
+- grp->bb_free / grp->bb_fragments : 0;
+- if (!best_so_far || avg_fragment_size < best_so_far) {
+- best_so_far = avg_fragment_size;
+- found = node;
+ }
+ }
+- if (avg_fragment_size > ac->ac_g_ex.fe_len)
+- node = node->rb_right;
+- else
+- node = node->rb_left;
++ read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
++ if (grp)
++ break;
+ }
+
+-done:
+- if (found) {
+- grp = rb_entry(found, struct ext4_group_info,
+- bb_avg_fragment_size_rb);
++ if (grp) {
+ *group = grp->bb_group;
+ ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
+ } else {
+ *new_cr = 2;
+ }
+-
+- read_unlock(&sbi->s_mb_rb_lock);
+- ac->ac_last_optimal_group = *group;
+ }
+
+ static inline int should_optimize_scan(struct ext4_allocation_context *ac)
+@@ -1017,11 +975,6 @@ next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
+ goto inc_and_return;
+ }
+
+- if (ac->ac_flags & EXT4_MB_SEARCH_NEXT_LINEAR) {
+- ac->ac_flags &= ~EXT4_MB_SEARCH_NEXT_LINEAR;
+- goto inc_and_return;
+- }
+-
+ return group;
+ inc_and_return:
+ /*
+@@ -1049,8 +1002,10 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
+ {
+ *new_cr = ac->ac_criteria;
+
+- if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining)
++ if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
++ *group = next_linear_group(ac, *group, ngroups);
+ return;
++ }
+
+ if (*new_cr == 0) {
+ ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
+@@ -1075,23 +1030,25 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int i;
+
+- if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) {
++ for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
++ if (grp->bb_counters[i] > 0)
++ break;
++ /* No need to move between order lists? */
++ if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
++ i == grp->bb_largest_free_order) {
++ grp->bb_largest_free_order = i;
++ return;
++ }
++
++ if (grp->bb_largest_free_order >= 0) {
+ write_lock(&sbi->s_mb_largest_free_orders_locks[
+ grp->bb_largest_free_order]);
+ list_del_init(&grp->bb_largest_free_order_node);
+ write_unlock(&sbi->s_mb_largest_free_orders_locks[
+ grp->bb_largest_free_order]);
+ }
+- grp->bb_largest_free_order = -1; /* uninit */
+-
+- for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) {
+- if (grp->bb_counters[i] > 0) {
+- grp->bb_largest_free_order = i;
+- break;
+- }
+- }
+- if (test_opt2(sb, MB_OPTIMIZE_SCAN) &&
+- grp->bb_largest_free_order >= 0 && grp->bb_free) {
++ grp->bb_largest_free_order = i;
++ if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
+ write_lock(&sbi->s_mb_largest_free_orders_locks[
+ grp->bb_largest_free_order]);
+ list_add_tail(&grp->bb_largest_free_order_node,
+@@ -1148,13 +1105,13 @@ void ext4_mb_generate_buddy(struct super_block *sb,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+ }
+ mb_set_largest_free_order(sb, grp);
++ mb_update_avg_fragment_size(sb, grp);
+
+ clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
+
+ period = get_cycles() - period;
+ atomic_inc(&sbi->s_mb_buddies_generated);
+ atomic64_add(period, &sbi->s_mb_generation_time);
+- mb_update_avg_fragment_size(sb, grp);
+ }
+
+ /* The buddy information is attached the buddy cache inode
+@@ -2630,7 +2587,7 @@ static noinline_for_stack int
+ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ {
+ ext4_group_t prefetch_grp = 0, ngroups, group, i;
+- int cr = -1;
++ int cr = -1, new_cr;
+ int err = 0, first_err = 0;
+ unsigned int nr = 0, prefetch_ios = 0;
+ struct ext4_sb_info *sbi;
+@@ -2701,17 +2658,14 @@ repeat:
+ * from the goal value specified
+ */
+ group = ac->ac_g_ex.fe_group;
+- ac->ac_last_optimal_group = group;
+ ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
+ prefetch_grp = group;
+
+- for (i = 0; i < ngroups; group = next_linear_group(ac, group, ngroups),
+- i++) {
+- int ret = 0, new_cr;
++ for (i = 0, new_cr = cr; i < ngroups; i++,
++ ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
++ int ret = 0;
+
+ cond_resched();
+-
+- ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups);
+ if (new_cr != cr) {
+ cr = new_cr;
+ goto repeat;
+@@ -2985,9 +2939,7 @@ __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
+ struct super_block *sb = pde_data(file_inode(seq->file));
+ unsigned long position;
+
+- read_lock(&EXT4_SB(sb)->s_mb_rb_lock);
+-
+- if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
++ if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
+ return NULL;
+ position = *pos + 1;
+ return (void *) ((unsigned long) position);
+@@ -2999,7 +2951,7 @@ static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, lof
+ unsigned long position;
+
+ ++*pos;
+- if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
++ if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
+ return NULL;
+ position = *pos + 1;
+ return (void *) ((unsigned long) position);
+@@ -3011,29 +2963,22 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ unsigned long position = ((unsigned long) v);
+ struct ext4_group_info *grp;
+- struct rb_node *n;
+- unsigned int count, min, max;
++ unsigned int count;
+
+ position--;
+ if (position >= MB_NUM_ORDERS(sb)) {
+- seq_puts(seq, "fragment_size_tree:\n");
+- n = rb_first(&sbi->s_mb_avg_fragment_size_root);
+- if (!n) {
+- seq_puts(seq, "\ttree_min: 0\n\ttree_max: 0\n\ttree_nodes: 0\n");
+- return 0;
+- }
+- grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
+- min = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
+- count = 1;
+- while (rb_next(n)) {
+- count++;
+- n = rb_next(n);
+- }
+- grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
+- max = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
++ position -= MB_NUM_ORDERS(sb);
++ if (position == 0)
++ seq_puts(seq, "avg_fragment_size_lists:\n");
+
+- seq_printf(seq, "\ttree_min: %u\n\ttree_max: %u\n\ttree_nodes: %u\n",
+- min, max, count);
++ count = 0;
++ read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
++ list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
++ bb_avg_fragment_size_node)
++ count++;
++ read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
++ seq_printf(seq, "\tlist_order_%u_groups: %u\n",
++ (unsigned int)position, count);
+ return 0;
+ }
+
+@@ -3043,9 +2988,11 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
+ seq_puts(seq, "max_free_order_lists:\n");
+ }
+ count = 0;
++ read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
+ list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
+ bb_largest_free_order_node)
+ count++;
++ read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
+ seq_printf(seq, "\tlist_order_%u_groups: %u\n",
+ (unsigned int)position, count);
+
+@@ -3053,11 +3000,7 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
+ }
+
+ static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
+-__releases(&EXT4_SB(sb)->s_mb_rb_lock)
+ {
+- struct super_block *sb = pde_data(file_inode(seq->file));
+-
+- read_unlock(&EXT4_SB(sb)->s_mb_rb_lock);
+ }
+
+ const struct seq_operations ext4_mb_seq_structs_summary_ops = {
+@@ -3170,8 +3113,9 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
+ init_rwsem(&meta_group_info[i]->alloc_sem);
+ meta_group_info[i]->bb_free_root = RB_ROOT;
+ INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
+- RB_CLEAR_NODE(&meta_group_info[i]->bb_avg_fragment_size_rb);
++ INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
+ meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
++ meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */
+ meta_group_info[i]->bb_group = group;
+
+ mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
+@@ -3420,7 +3364,24 @@ int ext4_mb_init(struct super_block *sb)
+ i++;
+ } while (i < MB_NUM_ORDERS(sb));
+
+- sbi->s_mb_avg_fragment_size_root = RB_ROOT;
++ sbi->s_mb_avg_fragment_size =
++ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
++ GFP_KERNEL);
++ if (!sbi->s_mb_avg_fragment_size) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ sbi->s_mb_avg_fragment_size_locks =
++ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
++ GFP_KERNEL);
++ if (!sbi->s_mb_avg_fragment_size_locks) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
++ INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
++ rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
++ }
+ sbi->s_mb_largest_free_orders =
+ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
+ GFP_KERNEL);
+@@ -3439,7 +3400,6 @@ int ext4_mb_init(struct super_block *sb)
+ INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
+ rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
+ }
+- rwlock_init(&sbi->s_mb_rb_lock);
+
+ spin_lock_init(&sbi->s_md_lock);
+ sbi->s_mb_free_pending = 0;
+@@ -3510,6 +3470,8 @@ out_free_locality_groups:
+ free_percpu(sbi->s_locality_groups);
+ sbi->s_locality_groups = NULL;
+ out:
++ kfree(sbi->s_mb_avg_fragment_size);
++ kfree(sbi->s_mb_avg_fragment_size_locks);
+ kfree(sbi->s_mb_largest_free_orders);
+ kfree(sbi->s_mb_largest_free_orders_locks);
+ kfree(sbi->s_mb_offsets);
+@@ -3576,6 +3538,8 @@ int ext4_mb_release(struct super_block *sb)
+ kvfree(group_info);
+ rcu_read_unlock();
+ }
++ kfree(sbi->s_mb_avg_fragment_size);
++ kfree(sbi->s_mb_avg_fragment_size_locks);
+ kfree(sbi->s_mb_largest_free_orders);
+ kfree(sbi->s_mb_largest_free_orders_locks);
+ kfree(sbi->s_mb_offsets);
+@@ -5187,6 +5151,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
+ struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ int bsbits = ac->ac_sb->s_blocksize_bits;
+ loff_t size, isize;
++ bool inode_pa_eligible, group_pa_eligible;
+
+ if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
+ return;
+@@ -5194,25 +5159,27 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
+ if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
+ return;
+
++ group_pa_eligible = sbi->s_mb_group_prealloc > 0;
++ inode_pa_eligible = true;
+ size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
+ isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
+ >> bsbits;
+
++ /* No point in using inode preallocation for closed files */
+ if ((size == isize) && !ext4_fs_is_busy(sbi) &&
+- !inode_is_open_for_write(ac->ac_inode)) {
+- ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
+- return;
+- }
++ !inode_is_open_for_write(ac->ac_inode))
++ inode_pa_eligible = false;
+
+- if (sbi->s_mb_group_prealloc <= 0) {
+- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+- return;
+- }
+-
+- /* don't use group allocation for large files */
+ size = max(size, isize);
+- if (size > sbi->s_mb_stream_request) {
+- ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
++ /* Don't use group allocation for large files */
++ if (size > sbi->s_mb_stream_request)
++ group_pa_eligible = false;
++
++ if (!group_pa_eligible) {
++ if (inode_pa_eligible)
++ ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
++ else
++ ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
+ return;
+ }
+
+@@ -5559,6 +5526,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
+ ext4_fsblk_t block = 0;
+ unsigned int inquota = 0;
+ unsigned int reserv_clstrs = 0;
++ int retries = 0;
+ u64 seq;
+
+ might_sleep();
+@@ -5661,7 +5629,8 @@ repeat:
+ ar->len = ac->ac_b_ex.fe_len;
+ }
+ } else {
+- if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
++ if (++retries < 3 &&
++ ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
+ goto repeat;
+ /*
+ * If block allocation fails then the pa allocated above
+diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
+index 39da92ceabf88..dcda2a943cee0 100644
+--- a/fs/ext4/mballoc.h
++++ b/fs/ext4/mballoc.h
+@@ -178,7 +178,6 @@ struct ext4_allocation_context {
+ /* copy of the best found extent taken before preallocation efforts */
+ struct ext4_free_extent ac_f_ex;
+
+- ext4_group_t ac_last_optimal_group;
+ __u32 ac_groups_considered;
+ __u32 ac_flags; /* allocation hints */
+ __u16 ac_groups_scanned;
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 7515a465ec03a..7c90b1ab3e00d 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -543,10 +543,9 @@
+ */
+ #ifdef CONFIG_CFI_CLANG
+ #define TEXT_CFI_JT \
+- . = ALIGN(PMD_SIZE); \
++ ALIGN_FUNCTION(); \
+ __cfi_jt_start = .; \
+ *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \
+- . = ALIGN(PMD_SIZE); \
+ __cfi_jt_end = .;
+ #else
+ #define TEXT_CFI_JT
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index e2d9daf7e8dd0..0fd96e92c6c65 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -686,10 +686,13 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
+ \
+ __blk_mq_alloc_disk(set, queuedata, &__key); \
+ })
++struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
++ struct lock_class_key *lkclass);
+ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
+ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q);
+ void blk_mq_unregister_dev(struct device *, struct request_queue *);
++void blk_mq_destroy_queue(struct request_queue *);
+
+ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
+ int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 62e3ff52ab033..83eb8869a8c94 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -148,6 +148,7 @@ struct gendisk {
+ #define GD_NATIVE_CAPACITY 3
+ #define GD_ADDED 4
+ #define GD_SUPPRESS_PART_SCAN 5
++#define GD_OWNS_QUEUE 6
+
+ struct mutex open_mutex; /* open/close mutex */
+ unsigned open_partitions; /* number of open partitions */
+@@ -559,7 +560,6 @@ struct request_queue {
+ #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
+ #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
+ #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
+-#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
+ #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
+ #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
+ #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
+@@ -587,7 +587,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
+ #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
+ #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
+ #define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags)
+-#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
+ #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
+ #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
+ #define blk_queue_noxmerges(q) \
+@@ -812,8 +811,6 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
+
+ int bdev_disk_changed(struct gendisk *disk, bool invalidate);
+
+-struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
+- struct lock_class_key *lkclass);
+ void put_disk(struct gendisk *disk);
+ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
+
+@@ -955,7 +952,6 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
+ /*
+ * Access functions for manipulating queue properties
+ */
+-extern void blk_cleanup_queue(struct request_queue *);
+ void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
+ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
+ extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
+diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
+index 4592d08459417..57aa459c6618a 100644
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -1083,9 +1083,10 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
+ * cover a worst-case of every other cpu being on one of two nodes for a
+ * very large NR_CPUS.
+ *
+- * Use PAGE_SIZE as a minimum for smaller configurations.
++ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
++ * unsigned comparison to -1.
+ */
+-#define CPUMAP_FILE_MAX_BYTES ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \
++#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
+ ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
+ #define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
+
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index fde258b3decd5..037a8d81a66cf 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -302,6 +302,23 @@ struct uart_state {
+ /* number of characters left in xmit buffer before we ask for more */
+ #define WAKEUP_CHARS 256
+
++/**
++ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
++ * @up: uart_port structure describing the port
++ * @chars: number of characters sent
++ *
++ * This function advances the tail of circular xmit buffer by the number of
++ * @chars transmitted and handles accounting of transmitted bytes (into
++ * @up's icount.tx).
++ */
++static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
++{
++ struct circ_buf *xmit = &up->state->xmit;
++
++ xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
++ up->icount.tx += chars;
++}
++
+ struct module;
+ struct tty_driver;
+
+diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
+index 184105d682942..f2273bd5a4c58 100644
+--- a/include/net/bond_3ad.h
++++ b/include/net/bond_3ad.h
+@@ -15,8 +15,6 @@
+ #define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
+ #define AD_TIMER_INTERVAL 100 /*msec*/
+
+-#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
+-
+ #define AD_LACP_SLOW 0
+ #define AD_LACP_FAST 1
+
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 3b816ae8b1f3b..7ac1773b99224 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -785,6 +785,9 @@ extern struct rtnl_link_ops bond_link_ops;
+ /* exported from bond_sysfs_slave.c */
+ extern const struct sysfs_ops slave_sysfs_ops;
+
++/* exported from bond_3ad.c */
++extern const u8 lacpdu_mcast_addr[];
++
+ static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
+ {
+ dev_core_stats_tx_dropped_inc(dev);
+diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
+index 667d889b92b52..3e1cea155049b 100644
+--- a/include/scsi/scsi_host.h
++++ b/include/scsi/scsi_host.h
+@@ -557,6 +557,8 @@ struct Scsi_Host {
+ struct scsi_host_template *hostt;
+ struct scsi_transport_template *transportt;
+
++ struct kref tagset_refcnt;
++ struct completion tagset_freed;
+ /* Area to keep a shared tag map */
+ struct blk_mq_tag_set tag_set;
+
+diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
+index 65e13a099b1a0..a9f5d884560ac 100644
+--- a/include/uapi/linux/xfrm.h
++++ b/include/uapi/linux/xfrm.h
+@@ -296,7 +296,7 @@ enum xfrm_attr_type_t {
+ XFRMA_ETIMER_THRESH,
+ XFRMA_SRCADDR, /* xfrm_address_t */
+ XFRMA_COADDR, /* xfrm_address_t */
+- XFRMA_LASTUSED, /* unsigned long */
++ XFRMA_LASTUSED, /* __u64 */
+ XFRMA_POLICY_TYPE, /* struct xfrm_userpolicy_type */
+ XFRMA_MIGRATE,
+ XFRMA_ALG_AEAD, /* struct xfrm_algo_aead */
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 602da2cfd57c8..15a6f1e93e5af 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -10951,6 +10951,9 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
+ io_poll_remove_all(ctx, NULL, true);
+ /* if we failed setting up the ctx, we might not have any rings */
+ io_iopoll_try_reap_events(ctx);
++ /* drop cached put refs after potentially doing completions */
++ if (current->io_uring)
++ io_uring_drop_tctx_refs(current);
+ }
+
+ INIT_WORK(&ctx->exit_work, io_ring_exit_work);
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index e702ca368539a..80c23f48f3b4b 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -6026,6 +6026,9 @@ struct cgroup *cgroup_get_from_id(u64 id)
+ if (!kn)
+ goto out;
+
++ if (kernfs_type(kn) != KERNFS_DIR)
++ goto put;
++
+ rcu_read_lock();
+
+ cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
+@@ -6033,7 +6036,7 @@ struct cgroup *cgroup_get_from_id(u64 id)
+ cgrp = NULL;
+
+ rcu_read_unlock();
+-
++put:
+ kernfs_put(kn);
+ out:
+ return cgrp;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index aa8a82bc67384..fc6e4f2523452 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3066,10 +3066,8 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
+ if (WARN_ON(!work->func))
+ return false;
+
+- if (!from_cancel) {
+- lock_map_acquire(&work->lockdep_map);
+- lock_map_release(&work->lockdep_map);
+- }
++ lock_map_acquire(&work->lockdep_map);
++ lock_map_release(&work->lockdep_map);
+
+ if (start_flush_work(work, &barr, from_cancel)) {
+ wait_for_completion(&barr.done);
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 2e24db4bff192..c399ab486557f 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -264,8 +264,10 @@ config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
+ config DEBUG_INFO_DWARF4
+ bool "Generate DWARF Version 4 debuginfo"
+ select DEBUG_INFO
++ depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
+ help
+- Generate DWARF v4 debug info. This requires gcc 4.5+ and gdb 7.0+.
++ Generate DWARF v4 debug info. This requires gcc 4.5+, binutils 2.35.2
++ if using clang without clang's integrated assembler, and gdb 7.0+.
+
+ If you have consumers of DWARF debug info that are not ready for
+ newer revisions of DWARF, you may wish to choose this or have your
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index dbd4b6f9b0e79..29ae1358d5f07 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -503,6 +503,7 @@ void slab_kmem_cache_release(struct kmem_cache *s)
+ void kmem_cache_destroy(struct kmem_cache *s)
+ {
+ int refcnt;
++ bool rcu_set;
+
+ if (unlikely(!s) || !kasan_check_byte(s))
+ return;
+@@ -510,6 +511,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
+ cpus_read_lock();
+ mutex_lock(&slab_mutex);
+
++ rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
++
+ refcnt = --s->refcount;
+ if (refcnt)
+ goto out_unlock;
+@@ -520,7 +523,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
+ out_unlock:
+ mutex_unlock(&slab_mutex);
+ cpus_read_unlock();
+- if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
++ if (!refcnt && !rcu_set)
+ kmem_cache_release(s);
+ }
+ EXPORT_SYMBOL(kmem_cache_destroy);
+diff --git a/mm/slub.c b/mm/slub.c
+index b1281b8654bd3..1eec942b8336c 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -310,6 +310,11 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
+ */
+ static nodemask_t slab_nodes;
+
++/*
++ * Workqueue used for flush_cpu_slab().
++ */
++static struct workqueue_struct *flushwq;
++
+ /********************************************************************
+ * Core slab cache functions
+ *******************************************************************/
+@@ -2730,7 +2735,7 @@ static void flush_all_cpus_locked(struct kmem_cache *s)
+ INIT_WORK(&sfw->work, flush_cpu_slab);
+ sfw->skip = false;
+ sfw->s = s;
+- schedule_work_on(cpu, &sfw->work);
++ queue_work_on(cpu, flushwq, &sfw->work);
+ }
+
+ for_each_online_cpu(cpu) {
+@@ -4880,6 +4885,8 @@ void __init kmem_cache_init(void)
+
+ void __init kmem_cache_init_late(void)
+ {
++ flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
++ WARN_ON(!flushwq);
+ }
+
+ struct kmem_cache *
+@@ -4950,6 +4957,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
+ /* Honor the call site pointer we received. */
+ trace_kmalloc(caller, ret, size, s->size, gfpflags);
+
++ ret = kasan_kmalloc(s, ret, size, gfpflags);
++
+ return ret;
+ }
+ EXPORT_SYMBOL(__kmalloc_track_caller);
+@@ -4981,6 +4990,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
+ /* Honor the call site pointer we received. */
+ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
+
++ ret = kasan_kmalloc(s, ret, size, gfpflags);
++
+ return ret;
+ }
+ EXPORT_SYMBOL(__kmalloc_node_track_caller);
+@@ -5914,7 +5925,8 @@ static char *create_unique_id(struct kmem_cache *s)
+ char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
+ char *p = name;
+
+- BUG_ON(!name);
++ if (!name)
++ return ERR_PTR(-ENOMEM);
+
+ *p++ = ':';
+ /*
+@@ -5972,6 +5984,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
+ * for the symlinks.
+ */
+ name = create_unique_id(s);
++ if (IS_ERR(name))
++ return PTR_ERR(name);
+ }
+
+ s->kobj.kset = kset;
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index b8f8da7ee3dea..41c1ad33d009f 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -10,6 +10,7 @@
+ #include <linux/atomic.h>
+ #include <linux/byteorder/generic.h>
+ #include <linux/container_of.h>
++#include <linux/errno.h>
+ #include <linux/gfp.h>
+ #include <linux/if.h>
+ #include <linux/if_arp.h>
+@@ -700,6 +701,9 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
+ int max_header_len = batadv_max_header_len();
+ int ret;
+
++ if (hard_iface->net_dev->mtu < ETH_MIN_MTU + max_header_len)
++ return -EINVAL;
++
+ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
+ goto out;
+
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 9a0ae59cdc500..4f385d52a1c49 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1040,8 +1040,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
+ goto free_iterate;
+ }
+
+- if (repl->valid_hooks != t->valid_hooks)
++ if (repl->valid_hooks != t->valid_hooks) {
++ ret = -EINVAL;
+ goto free_unlock;
++ }
+
+ if (repl->num_counters && repl->num_counters != t->private->nentries) {
+ ret = -EINVAL;
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 6aee04f75e3e4..bcba61ef5b378 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1572,9 +1572,8 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
+
+ switch (keys->control.addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+- addr_diff = (__force u32)keys->addrs.v4addrs.dst -
+- (__force u32)keys->addrs.v4addrs.src;
+- if (addr_diff < 0)
++ if ((__force u32)keys->addrs.v4addrs.dst <
++ (__force u32)keys->addrs.v4addrs.src)
+ swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
+
+ if ((__force u16)keys->ports.dst <
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 9f6f4a41245d4..1012012a061fe 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -1069,13 +1069,13 @@ static int __init inet6_init(void)
+ for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
+ INIT_LIST_HEAD(r);
+
++ raw_hashinfo_init(&raw_v6_hashinfo);
++
+ if (disable_ipv6_mod) {
+ pr_info("Loaded, but administratively disabled, reboot required to enable\n");
+ goto out;
+ }
+
+- raw_hashinfo_init(&raw_v6_hashinfo);
+-
+ err = proto_register(&tcpv6_prot, 1);
+ if (err)
+ goto out;
+diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
+index 0d9332e9cf71a..617f744a2e3a3 100644
+--- a/net/netfilter/nf_conntrack_ftp.c
++++ b/net/netfilter/nf_conntrack_ftp.c
+@@ -33,6 +33,7 @@ MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
+ MODULE_DESCRIPTION("ftp connection tracking helper");
+ MODULE_ALIAS("ip_conntrack_ftp");
+ MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
++static DEFINE_SPINLOCK(nf_ftp_lock);
+
+ #define MAX_PORTS 8
+ static u_int16_t ports[MAX_PORTS];
+@@ -409,7 +410,8 @@ static int help(struct sk_buff *skb,
+ }
+ datalen = skb->len - dataoff;
+
+- spin_lock_bh(&ct->lock);
++ /* seqadj (nat) uses ct->lock internally, nf_nat_ftp would cause deadlock */
++ spin_lock_bh(&nf_ftp_lock);
+ fb_ptr = skb->data + dataoff;
+
+ ends_in_nl = (fb_ptr[datalen - 1] == '\n');
+@@ -538,7 +540,7 @@ out_update_nl:
+ if (ends_in_nl)
+ update_nl_seq(ct, seq, ct_ftp_info, dir, skb);
+ out:
+- spin_unlock_bh(&ct->lock);
++ spin_unlock_bh(&nf_ftp_lock);
+ return ret;
+ }
+
+diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
+index 992decbcaa5c1..5703846bea3b6 100644
+--- a/net/netfilter/nf_conntrack_irc.c
++++ b/net/netfilter/nf_conntrack_irc.c
+@@ -157,15 +157,37 @@ static int help(struct sk_buff *skb, unsigned int protoff,
+ data = ib_ptr;
+ data_limit = ib_ptr + datalen;
+
+- /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
+- * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
+- while (data < data_limit - (19 + MINMATCHLEN)) {
+- if (memcmp(data, "\1DCC ", 5)) {
++ /* Skip any whitespace */
++ while (data < data_limit - 10) {
++ if (*data == ' ' || *data == '\r' || *data == '\n')
++ data++;
++ else
++ break;
++ }
++
++ /* strlen("PRIVMSG x ")=10 */
++ if (data < data_limit - 10) {
++ if (strncasecmp("PRIVMSG ", data, 8))
++ goto out;
++ data += 8;
++ }
++
++ /* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26
++ * 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26
++ */
++ while (data < data_limit - (21 + MINMATCHLEN)) {
++ /* Find first " :", the start of message */
++ if (memcmp(data, " :", 2)) {
+ data++;
+ continue;
+ }
++ data += 2;
++
++ /* then check that place only for the DCC command */
++ if (memcmp(data, "\1DCC ", 5))
++ goto out;
+ data += 5;
+- /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
++ /* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */
+
+ iph = ip_hdr(skb);
+ pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
+@@ -181,7 +203,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
+ pr_debug("DCC %s detected\n", dccprotos[i]);
+
+ /* we have at least
+- * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
++ * (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid
+ * data left (== 14/13 bytes) */
+ if (parse_dcc(data, data_limit, &dcc_ip,
+ &dcc_port, &addr_beg_p, &addr_end_p)) {
+diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
+index b83dc9bf0a5dd..78fd9122b70c7 100644
+--- a/net/netfilter/nf_conntrack_sip.c
++++ b/net/netfilter/nf_conntrack_sip.c
+@@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
+ return ret;
+ if (ret == 0)
+ break;
+- dataoff += *matchoff;
++ dataoff = *matchoff;
+ }
+ *in_header = 0;
+ }
+@@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
+ break;
+ if (ret == 0)
+ return ret;
+- dataoff += *matchoff;
++ dataoff = *matchoff;
+ }
+
+ if (in_header)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 848cc81d69926..2fde193c3d26a 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2197,7 +2197,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ struct netlink_ext_ack *extack)
+ {
+ const struct nlattr * const *nla = ctx->nla;
+- struct nft_stats __percpu *stats = NULL;
+ struct nft_table *table = ctx->table;
+ struct nft_base_chain *basechain;
+ struct net *net = ctx->net;
+@@ -2212,6 +2211,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ return -EOVERFLOW;
+
+ if (nla[NFTA_CHAIN_HOOK]) {
++ struct nft_stats __percpu *stats = NULL;
+ struct nft_chain_hook hook;
+
+ if (flags & NFT_CHAIN_BINDING)
+@@ -2243,8 +2243,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ if (err < 0) {
+ nft_chain_release_hook(&hook);
+ kfree(basechain);
++ free_percpu(stats);
+ return err;
+ }
++ if (stats)
++ static_branch_inc(&nft_counters_enabled);
+ } else {
+ if (flags & NFT_CHAIN_BASE)
+ return -EINVAL;
+@@ -2319,9 +2322,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ goto err_unregister_hook;
+ }
+
+- if (stats)
+- static_branch_inc(&nft_counters_enabled);
+-
+ table->use++;
+
+ return 0;
+diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
+index 0fa2e20304272..ee6840bd59337 100644
+--- a/net/netfilter/nfnetlink_osf.c
++++ b/net/netfilter/nfnetlink_osf.c
+@@ -269,6 +269,7 @@ bool nf_osf_find(const struct sk_buff *skb,
+ struct nf_osf_hdr_ctx ctx;
+ const struct tcphdr *tcp;
+ struct tcphdr _tcph;
++ bool found = false;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+@@ -283,10 +284,11 @@ bool nf_osf_find(const struct sk_buff *skb,
+
+ data->genre = f->genre;
+ data->version = f->version;
++ found = true;
+ break;
+ }
+
+- return true;
++ return found;
+ }
+ EXPORT_SYMBOL_GPL(nf_osf_find);
+
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index ac366c99086fd..7d7f7bac0216a 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -2136,6 +2136,7 @@ replay:
+ }
+
+ if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
++ tfilter_put(tp, fh);
+ NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
+ err = -EINVAL;
+ goto errout;
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 0b941dd63d268..86675a79da1e4 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -67,6 +67,7 @@ struct taprio_sched {
+ u32 flags;
+ enum tk_offsets tk_offset;
+ int clockid;
++ bool offloaded;
+ atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
+ * speeds it's sub-nanoseconds per byte
+ */
+@@ -1279,6 +1280,8 @@ static int taprio_enable_offload(struct net_device *dev,
+ goto done;
+ }
+
++ q->offloaded = true;
++
+ done:
+ taprio_offload_free(offload);
+
+@@ -1293,12 +1296,9 @@ static int taprio_disable_offload(struct net_device *dev,
+ struct tc_taprio_qopt_offload *offload;
+ int err;
+
+- if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
++ if (!q->offloaded)
+ return 0;
+
+- if (!ops->ndo_setup_tc)
+- return -EOPNOTSUPP;
+-
+ offload = taprio_offload_alloc(0);
+ if (!offload) {
+ NL_SET_ERR_MSG(extack,
+@@ -1314,6 +1314,8 @@ static int taprio_disable_offload(struct net_device *dev,
+ goto out;
+ }
+
++ q->offloaded = false;
++
+ out:
+ taprio_offload_free(offload);
+
+@@ -1949,12 +1951,14 @@ start_error:
+
+ static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
+ {
+- struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
++ struct taprio_sched *q = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++ unsigned int ntx = cl - 1;
+
+- if (!dev_queue)
++ if (ntx >= dev->num_tx_queues)
+ return NULL;
+
+- return dev_queue->qdisc_sleeping;
++ return q->qdiscs[ntx];
+ }
+
+ static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index 1f3bb1f6b1f7b..8095876b66eb6 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -2148,7 +2148,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
+ static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
+ struct smc_buf_desc *buf_desc, bool is_rmb)
+ {
+- int i, rc = 0;
++ int i, rc = 0, cnt = 0;
+
+ /* protect against parallel link reconfiguration */
+ mutex_lock(&lgr->llc_conf_mutex);
+@@ -2161,9 +2161,12 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
+ rc = -ENOMEM;
+ goto out;
+ }
++ cnt++;
+ }
+ out:
+ mutex_unlock(&lgr->llc_conf_mutex);
++ if (!rc && !cnt)
++ rc = -EINVAL;
+ return rc;
+ }
+
+diff --git a/scripts/Makefile.debug b/scripts/Makefile.debug
+index 9f39b0130551f..8cf1cb22dd934 100644
+--- a/scripts/Makefile.debug
++++ b/scripts/Makefile.debug
+@@ -1,20 +1,19 @@
+ DEBUG_CFLAGS :=
++debug-flags-y := -g
+
+ ifdef CONFIG_DEBUG_INFO_SPLIT
+ DEBUG_CFLAGS += -gsplit-dwarf
+-else
+-DEBUG_CFLAGS += -g
+ endif
+
+-ifndef CONFIG_AS_IS_LLVM
+-KBUILD_AFLAGS += -Wa,-gdwarf-2
+-endif
+-
+-ifndef CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
+-dwarf-version-$(CONFIG_DEBUG_INFO_DWARF4) := 4
+-dwarf-version-$(CONFIG_DEBUG_INFO_DWARF5) := 5
+-DEBUG_CFLAGS += -gdwarf-$(dwarf-version-y)
++debug-flags-$(CONFIG_DEBUG_INFO_DWARF4) += -gdwarf-4
++debug-flags-$(CONFIG_DEBUG_INFO_DWARF5) += -gdwarf-5
++ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_AS_IS_GNU),yy)
++# Clang does not pass -g or -gdwarf-* option down to GAS.
++# Add -Wa, prefix to explicitly specify the flags.
++KBUILD_AFLAGS += $(addprefix -Wa$(comma), $(debug-flags-y))
+ endif
++DEBUG_CFLAGS += $(debug-flags-y)
++KBUILD_AFLAGS += $(debug-flags-y)
+
+ ifdef CONFIG_DEBUG_INFO_REDUCED
+ DEBUG_CFLAGS += -fno-var-tracking
+@@ -29,5 +28,5 @@ KBUILD_AFLAGS += -gz=zlib
+ KBUILD_LDFLAGS += --compress-debug-sections=zlib
+ endif
+
+-KBUILD_CFLAGS += $(DEBUG_CFLAGS)
++KBUILD_CFLAGS += $(DEBUG_CFLAGS)
+ export DEBUG_CFLAGS
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 726a8353201f8..4eacfafa41730 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -178,10 +178,8 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
+ return -ENOMEM;
+
+ err = snd_card_init(card, parent, idx, xid, module, extra_size);
+- if (err < 0) {
+- kfree(card);
+- return err;
+- }
++ if (err < 0)
++ return err; /* card is freed by error handler */
+
+ *card_ret = card;
+ return 0;
+@@ -231,7 +229,7 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
+ card->managed = true;
+ err = snd_card_init(card, parent, idx, xid, module, extra_size);
+ if (err < 0) {
+- devres_free(card);
++ devres_free(card); /* in managed mode, we need to free manually */
+ return err;
+ }
+
+@@ -293,6 +291,8 @@ static int snd_card_init(struct snd_card *card, struct device *parent,
+ mutex_unlock(&snd_card_mutex);
+ dev_err(parent, "cannot find the slot for index %d (range 0-%i), error: %d\n",
+ idx, snd_ecards_limit - 1, err);
++ if (!card->managed)
++ kfree(card); /* manually free here, as no destructor called */
+ return err;
+ }
+ set_bit(idx, snd_cards_lock); /* lock it */
+diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
+index c572fb5886d5d..7af2515735957 100644
+--- a/sound/pci/hda/hda_bind.c
++++ b/sound/pci/hda/hda_bind.c
+@@ -157,10 +157,10 @@ static int hda_codec_driver_remove(struct device *dev)
+ return codec->bus->core.ext_ops->hdev_detach(&codec->core);
+ }
+
+- refcount_dec(&codec->pcm_ref);
+ snd_hda_codec_disconnect_pcms(codec);
+ snd_hda_jack_tbl_disconnect(codec);
+- wait_event(codec->remove_sleep, !refcount_read(&codec->pcm_ref));
++ if (!refcount_dec_and_test(&codec->pcm_ref))
++ wait_event(codec->remove_sleep, !refcount_read(&codec->pcm_ref));
+ snd_power_sync_ref(codec->bus->card);
+
+ if (codec->patch_ops.free)
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index b20694fd69dea..6f30c374f896e 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2550,6 +2550,8 @@ static const struct pci_device_id azx_ids[] = {
+ /* 5 Series/3400 */
+ { PCI_DEVICE(0x8086, 0x3b56),
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
++ { PCI_DEVICE(0x8086, 0x3b57),
++ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
+ /* Poulsbo */
+ { PCI_DEVICE(0x8086, 0x811b),
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE },
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 6c209cd26c0ca..c9d9aa6351ecf 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -170,6 +170,8 @@ struct hdmi_spec {
+ bool dyn_pcm_no_legacy;
+ /* hdmi interrupt trigger control flag for Nvidia codec */
+ bool hdmi_intr_trig_ctrl;
++ bool nv_dp_workaround; /* workaround DP audio infoframe for Nvidia */
++
+ bool intel_hsw_fixup; /* apply Intel platform-specific fixups */
+ /*
+ * Non-generic VIA/NVIDIA specific
+@@ -679,15 +681,24 @@ static void hdmi_pin_setup_infoframe(struct hda_codec *codec,
+ int ca, int active_channels,
+ int conn_type)
+ {
++ struct hdmi_spec *spec = codec->spec;
+ union audio_infoframe ai;
+
+ memset(&ai, 0, sizeof(ai));
+- if (conn_type == 0) { /* HDMI */
++ if ((conn_type == 0) || /* HDMI */
++ /* Nvidia DisplayPort: Nvidia HW expects same layout as HDMI */
++ (conn_type == 1 && spec->nv_dp_workaround)) {
+ struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
+
+- hdmi_ai->type = 0x84;
+- hdmi_ai->ver = 0x01;
+- hdmi_ai->len = 0x0a;
++ if (conn_type == 0) { /* HDMI */
++ hdmi_ai->type = 0x84;
++ hdmi_ai->ver = 0x01;
++ hdmi_ai->len = 0x0a;
++ } else {/* Nvidia DP */
++ hdmi_ai->type = 0x84;
++ hdmi_ai->ver = 0x1b;
++ hdmi_ai->len = 0x11 << 2;
++ }
+ hdmi_ai->CC02_CT47 = active_channels - 1;
+ hdmi_ai->CA = ca;
+ hdmi_checksum_audio_infoframe(hdmi_ai);
+@@ -3617,6 +3628,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
+ spec->pcm_playback.rates = SUPPORTED_RATES;
+ spec->pcm_playback.maxbps = SUPPORTED_MAXBPS;
+ spec->pcm_playback.formats = SUPPORTED_FORMATS;
++ spec->nv_dp_workaround = true;
+ return 0;
+ }
+
+@@ -3756,6 +3768,7 @@ static int patch_nvhdmi(struct hda_codec *codec)
+ spec->chmap.ops.chmap_cea_alloc_validate_get_type =
+ nvhdmi_chmap_cea_alloc_validate_get_type;
+ spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
++ spec->nv_dp_workaround = true;
+
+ codec->link_down_at_suspend = 1;
+
+@@ -3779,6 +3792,7 @@ static int patch_nvhdmi_legacy(struct hda_codec *codec)
+ spec->chmap.ops.chmap_cea_alloc_validate_get_type =
+ nvhdmi_chmap_cea_alloc_validate_get_type;
+ spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
++ spec->nv_dp_workaround = true;
+
+ codec->link_down_at_suspend = 1;
+
+@@ -3984,6 +3998,7 @@ static int tegra_hdmi_init(struct hda_codec *codec)
+
+ generic_hdmi_init_per_pins(codec);
+
++ codec->depop_delay = 10;
+ codec->patch_ops.build_pcms = tegra_hdmi_build_pcms;
+ spec->chmap.ops.chmap_cea_alloc_validate_get_type =
+ nvhdmi_chmap_cea_alloc_validate_get_type;
+@@ -3992,6 +4007,7 @@ static int tegra_hdmi_init(struct hda_codec *codec)
+ spec->chmap.ops.chmap_cea_alloc_validate_get_type =
+ nvhdmi_chmap_cea_alloc_validate_get_type;
+ spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
++ spec->nv_dp_workaround = true;
+
+ return 0;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 799f6bf266dd0..9614b63415a8e 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7037,6 +7037,8 @@ enum {
+ ALC294_FIXUP_ASUS_GU502_HP,
+ ALC294_FIXUP_ASUS_GU502_PINS,
+ ALC294_FIXUP_ASUS_GU502_VERBS,
++ ALC294_FIXUP_ASUS_G513_PINS,
++ ALC285_FIXUP_ASUS_G533Z_PINS,
+ ALC285_FIXUP_HP_GPIO_LED,
+ ALC285_FIXUP_HP_MUTE_LED,
+ ALC236_FIXUP_HP_GPIO_LED,
+@@ -8374,6 +8376,24 @@ static const struct hda_fixup alc269_fixups[] = {
+ [ALC294_FIXUP_ASUS_GU502_HP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc294_fixup_gu502_hp,
++ },
++ [ALC294_FIXUP_ASUS_G513_PINS] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x19, 0x03a11050 }, /* front HP mic */
++ { 0x1a, 0x03a11c30 }, /* rear external mic */
++ { 0x21, 0x03211420 }, /* front HP out */
++ { }
++ },
++ },
++ [ALC285_FIXUP_ASUS_G533Z_PINS] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x14, 0x90170120 },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC294_FIXUP_ASUS_G513_PINS,
+ },
+ [ALC294_FIXUP_ASUS_COEF_1B] = {
+ .type = HDA_FIXUP_VERBS,
+@@ -9114,6 +9134,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
++ SND_PCI_QUIRK(0x1028, 0x087d, "Dell Precision 5530", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+@@ -9130,6 +9151,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK),
++ SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -9257,6 +9279,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8972, "HP EliteBook 840 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9304,10 +9327,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
++ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
++ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
+ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
+- SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
+@@ -9323,14 +9347,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++ SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
+ SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
++ SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
++ SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
+ SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
++ SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+- SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+- SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+ SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
+@@ -9532,6 +9558,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
+ SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
++ SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
+ SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
+ SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index ff2aa13b7b26f..5d105c44b46df 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -758,8 +758,7 @@ bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
+ * The endpoint needs to be closed via snd_usb_endpoint_close() later.
+ *
+ * Note that this function doesn't configure the endpoint. The substream
+- * needs to set it up later via snd_usb_endpoint_set_params() and
+- * snd_usb_endpoint_prepare().
++ * needs to set it up later via snd_usb_endpoint_configure().
+ */
+ struct snd_usb_endpoint *
+ snd_usb_endpoint_open(struct snd_usb_audio *chip,
+@@ -1293,13 +1292,12 @@ out_of_memory:
+ /*
+ * snd_usb_endpoint_set_params: configure an snd_usb_endpoint
+ *
+- * It's called either from hw_params callback.
+ * Determine the number of URBs to be used on this endpoint.
+ * An endpoint must be configured before it can be started.
+ * An endpoint that is already running can not be reconfigured.
+ */
+-int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+- struct snd_usb_endpoint *ep)
++static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
++ struct snd_usb_endpoint *ep)
+ {
+ const struct audioformat *fmt = ep->cur_audiofmt;
+ int err;
+@@ -1382,18 +1380,18 @@ static int init_sample_rate(struct snd_usb_audio *chip,
+ }
+
+ /*
+- * snd_usb_endpoint_prepare: Prepare the endpoint
++ * snd_usb_endpoint_configure: Configure the endpoint
+ *
+ * This function sets up the EP to be fully usable state.
+- * It's called either from prepare callback.
++ * It's called either from hw_params or prepare callback.
+ * The function checks need_setup flag, and performs nothing unless needed,
+ * so it's safe to call this multiple times.
+ *
+ * This returns zero if unchanged, 1 if the configuration has changed,
+ * or a negative error code.
+ */
+-int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
+- struct snd_usb_endpoint *ep)
++int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
++ struct snd_usb_endpoint *ep)
+ {
+ bool iface_first;
+ int err = 0;
+@@ -1414,6 +1412,9 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
+ if (err < 0)
+ goto unlock;
+ }
++ err = snd_usb_endpoint_set_params(chip, ep);
++ if (err < 0)
++ goto unlock;
+ goto done;
+ }
+
+@@ -1441,6 +1442,10 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
+ if (err < 0)
+ goto unlock;
+
++ err = snd_usb_endpoint_set_params(chip, ep);
++ if (err < 0)
++ goto unlock;
++
+ err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt);
+ if (err < 0)
+ goto unlock;
+diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
+index e67ea28faa54f..6a9af04cf175a 100644
+--- a/sound/usb/endpoint.h
++++ b/sound/usb/endpoint.h
+@@ -17,10 +17,8 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
+ bool is_sync_ep);
+ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
+ struct snd_usb_endpoint *ep);
+-int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+- struct snd_usb_endpoint *ep);
+-int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
+- struct snd_usb_endpoint *ep);
++int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
++ struct snd_usb_endpoint *ep);
+ int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock);
+
+ bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 02035b545f9dd..e692ae04436a5 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -443,17 +443,17 @@ static int configure_endpoints(struct snd_usb_audio *chip,
+ if (stop_endpoints(subs, false))
+ sync_pending_stops(subs);
+ if (subs->sync_endpoint) {
+- err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
++ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
+ if (err < 0)
+ return err;
+ }
+- err = snd_usb_endpoint_prepare(chip, subs->data_endpoint);
++ err = snd_usb_endpoint_configure(chip, subs->data_endpoint);
+ if (err < 0)
+ return err;
+ snd_usb_set_format_quirk(subs, subs->cur_audiofmt);
+ } else {
+ if (subs->sync_endpoint) {
+- err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
++ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
+ if (err < 0)
+ return err;
+ }
+@@ -551,13 +551,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ subs->cur_audiofmt = fmt;
+ mutex_unlock(&chip->mutex);
+
+- if (subs->sync_endpoint) {
+- ret = snd_usb_endpoint_set_params(chip, subs->sync_endpoint);
+- if (ret < 0)
+- goto unlock;
+- }
+-
+- ret = snd_usb_endpoint_set_params(chip, subs->data_endpoint);
++ ret = configure_endpoints(chip, subs);
+
+ unlock:
+ if (ret < 0)
+diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
+index 6b1bafe267a42..8ec5b9f344e02 100644
+--- a/tools/lib/perf/evlist.c
++++ b/tools/lib/perf/evlist.c
+@@ -441,6 +441,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+
+ perf_evlist__for_each_entry(evlist, evsel) {
+ bool overwrite = evsel->attr.write_backward;
++ enum fdarray_flags flgs;
+ struct perf_mmap *map;
+ int *output, fd, cpu;
+
+@@ -504,8 +505,8 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+
+ revent = !overwrite ? POLLIN : 0;
+
+- if (!evsel->system_wide &&
+- perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) {
++ flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
++ if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
+ perf_mmap__put(map);
+ return -1;
+ }
+diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
+index 63b9db6574425..97c69a249c6e4 100644
+--- a/tools/perf/util/bpf_counter_cgroup.c
++++ b/tools/perf/util/bpf_counter_cgroup.c
+@@ -95,7 +95,7 @@ static int bperf_load_program(struct evlist *evlist)
+
+ perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
+ link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
+- FD(cgrp_switch, cpu.cpu));
++ FD(cgrp_switch, i));
+ if (IS_ERR(link)) {
+ pr_err("Failed to attach cgroup program\n");
+ err = PTR_ERR(link);
+@@ -123,7 +123,7 @@ static int bperf_load_program(struct evlist *evlist)
+
+ map_fd = bpf_map__fd(skel->maps.events);
+ perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) {
+- int fd = FD(evsel, cpu.cpu);
++ int fd = FD(evsel, j);
+ __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
+
+ err = bpf_map_update_elem(map_fd, &idx, &fd,
+diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
+index 292c430768b52..c72f8ad96f751 100644
+--- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
++++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
+@@ -176,7 +176,7 @@ static int bperf_cgroup_count(void)
+ }
+
+ // This will be attached to cgroup-switches event for each cpu
+-SEC("perf_events")
++SEC("perf_event")
+ int BPF_PROG(on_cgrp_switch)
+ {
+ return bperf_cgroup_count();
+diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
+index 953338b9e887e..02cd9f75e3d2f 100644
+--- a/tools/perf/util/genelf.c
++++ b/tools/perf/util/genelf.c
+@@ -251,6 +251,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+ Elf_Data *d;
+ Elf_Scn *scn;
+ Elf_Ehdr *ehdr;
++ Elf_Phdr *phdr;
+ Elf_Shdr *shdr;
+ uint64_t eh_frame_base_offset;
+ char *strsym = NULL;
+@@ -285,6 +286,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+ ehdr->e_version = EV_CURRENT;
+ ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
+
++ /*
++ * setup program header
++ */
++ phdr = elf_newphdr(e, 1);
++ phdr[0].p_type = PT_LOAD;
++ phdr[0].p_offset = 0;
++ phdr[0].p_vaddr = 0;
++ phdr[0].p_paddr = 0;
++ phdr[0].p_filesz = csize;
++ phdr[0].p_memsz = csize;
++ phdr[0].p_flags = PF_X | PF_R;
++ phdr[0].p_align = 8;
++
+ /*
+ * setup text section
+ */
+diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
+index ae138afe6c563..b5c909546e3f2 100644
+--- a/tools/perf/util/genelf.h
++++ b/tools/perf/util/genelf.h
+@@ -53,8 +53,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
+
+ #if GEN_ELF_CLASS == ELFCLASS64
+ #define elf_newehdr elf64_newehdr
++#define elf_newphdr elf64_newphdr
+ #define elf_getshdr elf64_getshdr
+ #define Elf_Ehdr Elf64_Ehdr
++#define Elf_Phdr Elf64_Phdr
+ #define Elf_Shdr Elf64_Shdr
+ #define Elf_Sym Elf64_Sym
+ #define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
+@@ -62,8 +64,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
+ #define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a)
+ #else
+ #define elf_newehdr elf32_newehdr
++#define elf_newphdr elf32_newphdr
+ #define elf_getshdr elf32_getshdr
+ #define Elf_Ehdr Elf32_Ehdr
++#define Elf_Phdr Elf32_Phdr
+ #define Elf_Shdr Elf32_Shdr
+ #define Elf_Sym Elf32_Sym
+ #define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index 75bec32d4f571..647b7dff8ef36 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -2102,8 +2102,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
+ * unusual. One significant peculiarity is that the mapping (start -> pgoff)
+ * is not the same for the kernel map and the modules map. That happens because
+ * the data is copied adjacently whereas the original kcore has gaps. Finally,
+- * kallsyms and modules files are compared with their copies to check that
+- * modules have not been loaded or unloaded while the copies were taking place.
++ * kallsyms file is compared with its copy to check that modules have not been
++ * loaded or unloaded while the copies were taking place.
+ *
+ * Return: %0 on success, %-1 on failure.
+ */
+@@ -2166,9 +2166,6 @@ int kcore_copy(const char *from_dir, const char *to_dir)
+ goto out_extract_close;
+ }
+
+- if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
+- goto out_extract_close;
+-
+ if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
+ goto out_extract_close;
+
+diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
+index 84d17bd4efaed..64e273b2b1b21 100644
+--- a/tools/perf/util/synthetic-events.c
++++ b/tools/perf/util/synthetic-events.c
+@@ -367,13 +367,24 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
+ bool is_kernel)
+ {
+ struct build_id bid;
++ struct nsinfo *nsi;
++ struct nscookie nc;
+ int rc;
+
+- if (is_kernel)
++ if (is_kernel) {
+ rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
+- else
+- rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
++ goto out;
++ }
++
++ nsi = nsinfo__new(event->pid);
++ nsinfo__mountns_enter(nsi, &nc);
+
++ rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
++
++ nsinfo__mountns_exit(&nc);
++ nsinfo__put(nsi);
++
++out:
+ if (rc == 0) {
+ memcpy(event->build_id, bid.data, sizeof(bid.data));
+ event->build_id_size = (u8) bid.size;
+diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh
+index e714bae473fb4..81f31179ac887 100755
+--- a/tools/testing/selftests/net/forwarding/sch_red.sh
++++ b/tools/testing/selftests/net/forwarding/sch_red.sh
+@@ -1,3 +1,4 @@
++#!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+
+ # This test sends one stream of traffic from H1 through a TBF shaper, to a RED