Fixes for 5.17
Signed-off-by: Sasha Levin <sashal@kernel.org>
diff --git a/queue-5.17/alsa-hda-fix-unused-realtek-function-when-pm-is-not-.patch b/queue-5.17/alsa-hda-fix-unused-realtek-function-when-pm-is-not-.patch
new file mode 100644
index 0000000..ca4aa32
--- /dev/null
+++ b/queue-5.17/alsa-hda-fix-unused-realtek-function-when-pm-is-not-.patch
@@ -0,0 +1,63 @@
+From 09b6cafa627c6f3a7351fc8e9878aac0ee18c39d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Apr 2022 12:33:18 -0700
+Subject: ALSA: hda - fix unused Realtek function when PM is not enabled
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+[ Upstream commit c3d9ca93f1e3bd3d1adfc4479a12c82fed424c87 ]
+
+When CONFIG_PM is not enabled, alc_shutup() is not needed,
+so move it inside the #ifdef CONFIG_PM guard.
+Also drop some contiguous #endif / #ifdef CONFIG_PM for simplicity.
+
+Fixes this build warning:
+sound/pci/hda/patch_realtek.c:886:20: warning: unused function 'alc_shutup'
+
+Fixes: 08c189f2c552 ("ALSA: hda - Use generic parser codes for Realtek driver")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Reported-by: kernel test robot <lkp@intel.com>
+Link: https://lore.kernel.org/r/20220430193318.29024-1-rdunlap@infradead.org
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_realtek.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 779205bf5862..e38acdbe1a3b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -937,6 +937,9 @@ static int alc_init(struct hda_codec *codec)
+ return 0;
+ }
+
++#define alc_free snd_hda_gen_free
++
++#ifdef CONFIG_PM
+ static inline void alc_shutup(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+@@ -950,9 +953,6 @@ static inline void alc_shutup(struct hda_codec *codec)
+ alc_shutup_pins(codec);
+ }
+
+-#define alc_free snd_hda_gen_free
+-
+-#ifdef CONFIG_PM
+ static void alc_power_eapd(struct hda_codec *codec)
+ {
+ alc_auto_setup_eapd(codec, false);
+@@ -966,9 +966,7 @@ static int alc_suspend(struct hda_codec *codec)
+ spec->power_hook(codec);
+ return 0;
+ }
+-#endif
+
+-#ifdef CONFIG_PM
+ static int alc_resume(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+--
+2.35.1
+
diff --git a/queue-5.17/arm-9196-1-spectre-bhb-enable-for-cortex-a15.patch b/queue-5.17/arm-9196-1-spectre-bhb-enable-for-cortex-a15.patch
new file mode 100644
index 0000000..cc5e027
--- /dev/null
+++ b/queue-5.17/arm-9196-1-spectre-bhb-enable-for-cortex-a15.patch
@@ -0,0 +1,36 @@
+From 0c4b9326d8fede6f2a89fec9da11a17a4bc2f64b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Apr 2022 09:44:51 +0100
+Subject: ARM: 9196/1: spectre-bhb: enable for Cortex-A15
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Upstream commit 0dc14aa94ccd8ba35eb17a0f9b123d1566efd39e ]
+
+The Spectre-BHB mitigations were inadvertently left disabled for
+Cortex-A15, due to the fact that cpu_v7_bugs_init() is not called in
+that case. So fix that.
+
+Fixes: b9baf5c8c5c3 ("ARM: Spectre-BHB workaround")
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/mm/proc-v7-bugs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
+index 06dbfb968182..fb9f3eb6bf48 100644
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -288,6 +288,7 @@ void cpu_v7_ca15_ibe(void)
+ {
+ if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
+ cpu_v7_spectre_v2_init();
++ cpu_v7_spectre_bhb_init();
+ }
+
+ void cpu_v7_bugs_init(void)
+--
+2.35.1
+
diff --git a/queue-5.17/arm-9197-1-spectre-bhb-fix-loop8-sequence-for-thumb2.patch b/queue-5.17/arm-9197-1-spectre-bhb-fix-loop8-sequence-for-thumb2.patch
new file mode 100644
index 0000000..0ab9bf6
--- /dev/null
+++ b/queue-5.17/arm-9197-1-spectre-bhb-fix-loop8-sequence-for-thumb2.patch
@@ -0,0 +1,37 @@
+From 89fab676e9afd9efcf241c0e26026dba8f927eff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Apr 2022 09:46:17 +0100
+Subject: ARM: 9197/1: spectre-bhb: fix loop8 sequence for Thumb2
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+[ Upstream commit 3cfb3019979666bdf33a1010147363cf05e0f17b ]
+
+In Thumb2, 'b . + 4' produces a branch instruction that uses a narrow
+encoding, and so it does not jump to the following instruction as
+expected. So use W(b) instead.
+
+Fixes: 6c7cb60bff7a ("ARM: fix Thumb2 regression with Spectre BHB")
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/kernel/entry-armv.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index ee3f7a599181..4bbd92d41031 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -1040,7 +1040,7 @@ vector_bhb_loop8_\name:
+
+ @ bhb workaround
+ mov r0, #8
+-3: b . + 4
++3: W(b) . + 4
+ subs r0, r0, #1
+ bne 3b
+ dsb
+--
+2.35.1
+
diff --git a/queue-5.17/arm-dts-aspeed-add-video-engine-to-g6.patch b/queue-5.17/arm-dts-aspeed-add-video-engine-to-g6.patch
new file mode 100644
index 0000000..f477641
--- /dev/null
+++ b/queue-5.17/arm-dts-aspeed-add-video-engine-to-g6.patch
@@ -0,0 +1,45 @@
+From 5f897874defd302ca0f4df4ba4a4336e12c791a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Mar 2022 03:23:51 +0000
+Subject: ARM: dts: aspeed: Add video engine to g6
+
+From: Howard Chiu <howard_chiu@aspeedtech.com>
+
+[ Upstream commit 32e62d1beab70d485980013312e747a25c4e13f7 ]
+
+This node was accidentally removed by commit 645afe73f951 ("ARM: dts:
+aspeed: ast2600: Update XDMA engine node").
+
+Fixes: 645afe73f951 ("ARM: dts: aspeed: ast2600: Update XDMA engine node")
+Signed-off-by: Howard Chiu <howard_chiu@aspeedtech.com>
+Link: https://lore.kernel.org/r/SG2PR06MB2315C57600A0132FEF40F21EE61E9@SG2PR06MB2315.apcprd06.prod.outlook.com
+Signed-off-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/aspeed-g6.dtsi | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
+index c32e87fad4dc..aac55b3aeded 100644
+--- a/arch/arm/boot/dts/aspeed-g6.dtsi
++++ b/arch/arm/boot/dts/aspeed-g6.dtsi
+@@ -389,6 +389,16 @@ sbc: secure-boot-controller@1e6f2000 {
+ reg = <0x1e6f2000 0x1000>;
+ };
+
++ video: video@1e700000 {
++ compatible = "aspeed,ast2600-video-engine";
++ reg = <0x1e700000 0x1000>;
++ clocks = <&syscon ASPEED_CLK_GATE_VCLK>,
++ <&syscon ASPEED_CLK_GATE_ECLK>;
++ clock-names = "vclk", "eclk";
++ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
+ gpio0: gpio@1e780000 {
+ #gpio-cells = <2>;
+ gpio-controller;
+--
+2.35.1
+
diff --git a/queue-5.17/arm-dts-aspeed-g6-fix-spi1-spi2-quad-pin-group.patch b/queue-5.17/arm-dts-aspeed-g6-fix-spi1-spi2-quad-pin-group.patch
new file mode 100644
index 0000000..6caeef8
--- /dev/null
+++ b/queue-5.17/arm-dts-aspeed-g6-fix-spi1-spi2-quad-pin-group.patch
@@ -0,0 +1,45 @@
+From 0a924260a7731f361d86da6d33b237c9f9cdd849 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Mar 2022 10:39:32 -0700
+Subject: ARM: dts: aspeed-g6: fix SPI1/SPI2 quad pin group
+
+From: Jae Hyun Yoo <quic_jaehyoo@quicinc.com>
+
+[ Upstream commit 890362d41b244536ab63591f813393f5fdf59ed7 ]
+
+Fix incorrect function mappings in pinctrl_qspi1_default and
+pinctrl_qspi2_default since their function should be SPI1 and
+SPI2 respectively.
+
+Fixes: f510f04c8c83 ("ARM: dts: aspeed: Add AST2600 pinmux nodes")
+Signed-off-by: Jae Hyun Yoo <quic_jaehyoo@quicinc.com>
+Reviewed-by: Andrew Jeffery <andrew@aj.id.au>
+Link: https://lore.kernel.org/r/20220329173932.2588289-8-quic_jaehyoo@quicinc.com
+Signed-off-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
+index 06d60a8540e9..ac07c240419a 100644
+--- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
++++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
+@@ -648,12 +648,12 @@ pinctrl_pwm9g1_default: pwm9g1_default {
+ };
+
+ pinctrl_qspi1_default: qspi1_default {
+- function = "QSPI1";
++ function = "SPI1";
+ groups = "QSPI1";
+ };
+
+ pinctrl_qspi2_default: qspi2_default {
+- function = "QSPI2";
++ function = "SPI2";
+ groups = "QSPI2";
+ };
+
+--
+2.35.1
+
diff --git a/queue-5.17/arm-dts-aspeed-g6-remove-fwqspid-group-in-pinctrl-dt.patch b/queue-5.17/arm-dts-aspeed-g6-remove-fwqspid-group-in-pinctrl-dt.patch
new file mode 100644
index 0000000..1a5309f
--- /dev/null
+++ b/queue-5.17/arm-dts-aspeed-g6-remove-fwqspid-group-in-pinctrl-dt.patch
@@ -0,0 +1,43 @@
+From 98d1e0ad2ed5bcdaa1d2f9d9665bb7e0edbec959 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Mar 2022 10:39:26 -0700
+Subject: ARM: dts: aspeed-g6: remove FWQSPID group in pinctrl dtsi
+
+From: Jae Hyun Yoo <quic_jaehyoo@quicinc.com>
+
+[ Upstream commit efddaa397cceefb61476e383c26fafd1f8ab6356 ]
+
+FWSPIDQ2 and FWSPIDQ3 are not part of FWSPI18 interface so remove
+FWQSPID group in pinctrl dtsi. These pins must be used with the
+FWSPI pins that are dedicated for boot SPI interface which provides
+same 3.3v logic level.
+
+Fixes: 2f6edb6bcb2f ("ARM: dts: aspeed: Fix AST2600 quad spi group")
+Signed-off-by: Jae Hyun Yoo <quic_jaehyoo@quicinc.com>
+Reviewed-by: Andrew Jeffery <andrew@aj.id.au>
+Link: https://lore.kernel.org/r/20220329173932.2588289-2-quic_jaehyoo@quicinc.com
+Signed-off-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
+index e4775bbceecc..06d60a8540e9 100644
+--- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
++++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
+@@ -117,11 +117,6 @@ pinctrl_fwspid_default: fwspid_default {
+ groups = "FWSPID";
+ };
+
+- pinctrl_fwqspid_default: fwqspid_default {
+- function = "FWSPID";
+- groups = "FWQSPID";
+- };
+-
+ pinctrl_fwspiwp_default: fwspiwp_default {
+ function = "FWSPIWP";
+ groups = "FWSPIWP";
+--
+2.35.1
+
diff --git a/queue-5.17/arm64-dts-qcom-sm8250-don-t-enable-rx-tx-macro-by-de.patch b/queue-5.17/arm64-dts-qcom-sm8250-don-t-enable-rx-tx-macro-by-de.patch
new file mode 100644
index 0000000..e6fe68b
--- /dev/null
+++ b/queue-5.17/arm64-dts-qcom-sm8250-don-t-enable-rx-tx-macro-by-de.patch
@@ -0,0 +1,108 @@
+From 0f2e00e1e9d5f925467f1580fdd103e4c0bddf18 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Apr 2022 21:58:14 +0300
+Subject: arm64: dts: qcom: sm8250: don't enable rx/tx macro by default
+
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+
+[ Upstream commit 18019eb62efb68c9b365acca9c4fcb2e0d459487 ]
+
+Enabling rxmacro and txmacro nodes by defaults makes Qualcomm RB5 to
+crash and reboot while probing audio devices. Disable these device tree
+nodes by default and enabled them only when necessary (for the
+SM8250-MTP board).
+
+Fixes: 24f52ef0c4bf ("arm64: dts: qcom: sm8250: Add nodes for tx and rx macros with soundwire masters")
+Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Link: https://lore.kernel.org/r/20220401185814.519653-1-dmitry.baryshkov@linaro.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/boot/dts/qcom/sm8250-mtp.dts | 12 ++++++++++++
+ arch/arm64/boot/dts/qcom/sm8250.dtsi | 4 ++++
+ 2 files changed, 16 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+index fb99cc2827c7..7ab3627cc347 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+@@ -622,6 +622,10 @@ &qupv3_id_2 {
+ status = "okay";
+ };
+
++&rxmacro {
++ status = "okay";
++};
++
+ &slpi {
+ status = "okay";
+ firmware-name = "qcom/sm8250/slpi.mbn";
+@@ -773,6 +777,8 @@ right_spkr: wsa8810-left@0,4{
+ };
+
+ &swr1 {
++ status = "okay";
++
+ wcd_rx: wcd9380-rx@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+@@ -781,6 +787,8 @@ wcd_rx: wcd9380-rx@0,4 {
+ };
+
+ &swr2 {
++ status = "okay";
++
+ wcd_tx: wcd9380-tx@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+@@ -819,6 +827,10 @@ config {
+ };
+ };
+
++&txmacro {
++ status = "okay";
++};
++
+ &uart12 {
+ status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index a92230bec1dd..bd212f6c351f 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -2150,6 +2150,7 @@ rxmacro: rxmacro@3200000 {
+ pinctrl-0 = <&rx_swr_active>;
+ compatible = "qcom,sm8250-lpass-rx-macro";
+ reg = <0 0x3200000 0 0x1000>;
++ status = "disabled";
+
+ clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+@@ -2168,6 +2169,7 @@ rxmacro: rxmacro@3200000 {
+ swr1: soundwire-controller@3210000 {
+ reg = <0 0x3210000 0 0x2000>;
+ compatible = "qcom,soundwire-v1.5.1";
++ status = "disabled";
+ interrupts = <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rxmacro>;
+ clock-names = "iface";
+@@ -2195,6 +2197,7 @@ txmacro: txmacro@3220000 {
+ pinctrl-0 = <&tx_swr_active>;
+ compatible = "qcom,sm8250-lpass-tx-macro";
+ reg = <0 0x3220000 0 0x1000>;
++ status = "disabled";
+
+ clocks = <&q6afecc LPASS_CLK_ID_TX_CORE_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ <&q6afecc LPASS_CLK_ID_TX_CORE_NPL_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+@@ -2218,6 +2221,7 @@ swr2: soundwire-controller@3230000 {
+ compatible = "qcom,soundwire-v1.5.1";
+ interrupts-extended = <&intc GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "core";
++ status = "disabled";
+
+ clocks = <&txmacro>;
+ clock-names = "iface";
+--
+2.35.1
+
diff --git a/queue-5.17/arm64-enable-repeat-tlbi-workaround-on-kryo4xx-gold-.patch b/queue-5.17/arm64-enable-repeat-tlbi-workaround-on-kryo4xx-gold-.patch
new file mode 100644
index 0000000..2eb7098
--- /dev/null
+++ b/queue-5.17/arm64-enable-repeat-tlbi-workaround-on-kryo4xx-gold-.patch
@@ -0,0 +1,57 @@
+From faddba1d5fd4ef8af3740fa1cac74fdd3296af36 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 May 2022 16:31:34 +0530
+Subject: arm64: Enable repeat tlbi workaround on KRYO4XX gold CPUs
+
+From: Shreyas K K <quic_shrekk@quicinc.com>
+
+[ Upstream commit 51f559d66527e238f9a5f82027bff499784d4eac ]
+
+Add KRYO4XX gold/big cores to the list of CPUs that need the
+repeat TLBI workaround. Apply this to the affected
+KRYO4XX cores (rcpe to rfpe).
+
+The variant and revision bits are implementation defined and are
+different from the their Cortex CPU counterparts on which they are
+based on, i.e., (r0p0 to r3p0) is equivalent to (rcpe to rfpe).
+
+Signed-off-by: Shreyas K K <quic_shrekk@quicinc.com>
+Reviewed-by: Sai Prakash Ranjan <quic_saipraka@quicinc.com>
+Link: https://lore.kernel.org/r/20220512110134.12179-1-quic_shrekk@quicinc.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/arm64/silicon-errata.rst | 3 +++
+ arch/arm64/kernel/cpu_errata.c | 2 ++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
+index ea281dd75517..29b136849d30 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -189,6 +189,9 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Qualcomm Tech. | Kryo4xx Silver | N/A | ARM64_ERRATUM_1024718 |
+ +----------------+-----------------+-----------------+-----------------------------+
++| Qualcomm Tech. | Kryo4xx Gold | N/A | ARM64_ERRATUM_1286807 |
+++----------------+-----------------+-----------------+-----------------------------+
++
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
+ +----------------+-----------------+-----------------+-----------------------------+
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 146fa2e76834..10c865e311a0 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
+ #ifdef CONFIG_ARM64_ERRATUM_1286807
+ {
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
++ /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
++ ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
+ },
+ #endif
+ {},
+--
+2.35.1
+
diff --git a/queue-5.17/arm64-kexec-load-from-kimage-prior-to-clobbering.patch b/queue-5.17/arm64-kexec-load-from-kimage-prior-to-clobbering.patch
new file mode 100644
index 0000000..142360f
--- /dev/null
+++ b/queue-5.17/arm64-kexec-load-from-kimage-prior-to-clobbering.patch
@@ -0,0 +1,90 @@
+From 94f293dc2cfe4939d3588589de8bf15441c778d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 May 2022 17:07:35 +0100
+Subject: arm64: kexec: load from kimage prior to clobbering
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit eb3d8ea3e1f03f4b0b72d8f5ed9eb7c3165862e8 ]
+
+In arm64_relocate_new_kernel() we load some fields out of the kimage
+structure after relocation has occurred. As the kimage structure isn't
+allocated to be relocation-safe, it may be clobbered during relocation,
+and we may load junk values out of the structure.
+
+Due to this, kexec may fail when the kimage allocation happens to fall
+within a PA range that an object will be relocated to. This has been
+observed to occur for regular kexec on a QEMU TCG 'virt' machine with
+2GiB of RAM, where the PA range of the new kernel image overlaps the
+kimage structure.
+
+Avoid this by ensuring we load all values from the kimage structure
+prior to relocation.
+
+I've tested this atop v5.16 and v5.18-rc6.
+
+Fixes: 878fdbd70486 ("arm64: kexec: pass kimage as the only argument to relocation function")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
+Cc: Will Deacon <will@kernel.org>
+Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
+Link: https://lore.kernel.org/r/20220516160735.731404-1-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/relocate_kernel.S | 22 +++++++++++++++-------
+ 1 file changed, 15 insertions(+), 7 deletions(-)
+
+diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
+index f0a3df9e18a3..413f899e4ac6 100644
+--- a/arch/arm64/kernel/relocate_kernel.S
++++ b/arch/arm64/kernel/relocate_kernel.S
+@@ -37,6 +37,15 @@
+ * safe memory that has been set up to be preserved during the copy operation.
+ */
+ SYM_CODE_START(arm64_relocate_new_kernel)
++ /*
++ * The kimage structure isn't allocated specially and may be clobbered
++ * during relocation. We must load any values we need from it prior to
++ * any relocation occurring.
++ */
++ ldr x28, [x0, #KIMAGE_START]
++ ldr x27, [x0, #KIMAGE_ARCH_EL2_VECTORS]
++ ldr x26, [x0, #KIMAGE_ARCH_DTB_MEM]
++
+ /* Setup the list loop variables. */
+ ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
+ ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */
+@@ -72,21 +81,20 @@ SYM_CODE_START(arm64_relocate_new_kernel)
+ ic iallu
+ dsb nsh
+ isb
+- ldr x4, [x0, #KIMAGE_START] /* relocation start */
+- ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */
+- ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */
+ turn_off_mmu x12, x13
+
+ /* Start new image. */
+- cbz x1, .Lel1
+- mov x1, x4 /* relocation start */
+- mov x2, x0 /* dtb address */
++ cbz x27, .Lel1
++ mov x1, x28 /* kernel entry point */
++ mov x2, x26 /* dtb address */
+ mov x3, xzr
+ mov x4, xzr
+ mov x0, #HVC_SOFT_RESTART
+ hvc #0 /* Jumps from el2 */
+ .Lel1:
++ mov x0, x26 /* dtb address */
++ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+- br x4 /* Jumps from el1 */
++ br x28 /* Jumps from el1 */
+ SYM_CODE_END(arm64_relocate_new_kernel)
+--
+2.35.1
+
diff --git a/queue-5.17/block-mq-deadline-set-the-fifo_time-member-also-if-i.patch b/queue-5.17/block-mq-deadline-set-the-fifo_time-member-also-if-i.patch
new file mode 100644
index 0000000..2c55944
--- /dev/null
+++ b/queue-5.17/block-mq-deadline-set-the-fifo_time-member-also-if-i.patch
@@ -0,0 +1,41 @@
+From fad78d1779995d356f0d4e575e3964e75d7aac68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 May 2022 10:13:07 -0700
+Subject: block/mq-deadline: Set the fifo_time member also if inserting at head
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 725f22a1477c9c15aa67ad3af96fe28ec4fe72d2 ]
+
+Before commit 322cff70d46c the fifo_time member of requests on a dispatch
+list was not used. Commit 322cff70d46c introduces code that reads the
+fifo_time member of requests on dispatch lists. Hence this patch that sets
+the fifo_time member when adding a request to a dispatch list.
+
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Ming Lei <ming.lei@redhat.com>
+Cc: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Fixes: 322cff70d46c ("block/mq-deadline: Prioritize high-priority requests")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20220513171307.32564-1-bvanassche@acm.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/mq-deadline.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 3ed5eaf3446a..6ed602b2f80a 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -742,6 +742,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ if (at_head) {
+ list_add(&rq->queuelist, &per_prio->dispatch);
++ rq->fifo_time = jiffies;
+ } else {
+ deadline_add_rq_rb(per_prio, rq);
+
+--
+2.35.1
+
diff --git a/queue-5.17/cfg80211-retrieve-s1g-operating-channel-number.patch b/queue-5.17/cfg80211-retrieve-s1g-operating-channel-number.patch
new file mode 100644
index 0000000..7c4591a
--- /dev/null
+++ b/queue-5.17/cfg80211-retrieve-s1g-operating-channel-number.patch
@@ -0,0 +1,40 @@
+From 32c1dcaab7e48463f712a3059df18df419ea4b7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Apr 2022 04:13:20 +0000
+Subject: cfg80211: retrieve S1G operating channel number
+
+From: Kieran Frewen <kieran.frewen@morsemicro.com>
+
+[ Upstream commit e847ffe2d146cfd52980ca688d84358e024a6e70 ]
+
+When retrieving the S1G channel number from IEs, we should retrieve
+the operating channel instead of the primary channel. The S1G operation
+element specifies the main channel of operation as the oper channel,
+unlike for HT and HE which specify their main channel of operation as
+the primary channel.
+
+Signed-off-by: Kieran Frewen <kieran.frewen@morsemicro.com>
+Signed-off-by: Bassem Dawood <bassem@morsemicro.com>
+Link: https://lore.kernel.org/r/20220420041321.3788789-1-kieran.frewen@morsemicro.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/scan.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 4a6d86432910..6d82bd9eaf8c 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -1829,7 +1829,7 @@ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
+ if (tmp && tmp->datalen >= sizeof(struct ieee80211_s1g_oper_ie)) {
+ struct ieee80211_s1g_oper_ie *s1gop = (void *)tmp->data;
+
+- return s1gop->primary_ch;
++ return s1gop->oper_ch;
+ }
+ } else {
+ tmp = cfg80211_find_elem(WLAN_EID_DS_PARAMS, ie, ielen);
+--
+2.35.1
+
diff --git a/queue-5.17/clk-at91-generated-consider-range-when-calculating-b.patch b/queue-5.17/clk-at91-generated-consider-range-when-calculating-b.patch
new file mode 100644
index 0000000..208c301
--- /dev/null
+++ b/queue-5.17/clk-at91-generated-consider-range-when-calculating-b.patch
@@ -0,0 +1,42 @@
+From 4e636dfbae904babd95d26eae073c6a6d26f15d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Apr 2022 10:13:18 +0300
+Subject: clk: at91: generated: consider range when calculating best rate
+
+From: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
+
+[ Upstream commit d0031e6fbed955ff8d5f5bbc8fe7382482559cec ]
+
+clk_generated_best_diff() helps in finding the parent and the divisor to
+compute a rate closest to the required one. However, it doesn't take into
+account the request's range for the new rate. Make sure the new rate
+is within the required range.
+
+Fixes: 8a8f4bf0c480 ("clk: at91: clk-generated: create function to find best_diff")
+Signed-off-by: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
+Link: https://lore.kernel.org/r/20220413071318.244912-1-codrin.ciubotariu@microchip.com
+Reviewed-by: Claudiu Beznea <claudiu.beznea@microchip.com>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/at91/clk-generated.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
+index 23cc8297ec4c..d429ba52a719 100644
+--- a/drivers/clk/at91/clk-generated.c
++++ b/drivers/clk/at91/clk-generated.c
+@@ -117,6 +117,10 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
+ tmp_rate = parent_rate;
+ else
+ tmp_rate = parent_rate / div;
++
++ if (tmp_rate < req->min_rate || tmp_rate > req->max_rate)
++ return;
++
+ tmp_diff = abs(req->rate - tmp_rate);
+
+ if (*best_diff < 0 || *best_diff >= tmp_diff) {
+--
+2.35.1
+
diff --git a/queue-5.17/drm-amd-display-undo-clearing-of-z10-related-functio.patch b/queue-5.17/drm-amd-display-undo-clearing-of-z10-related-functio.patch
new file mode 100644
index 0000000..8df9cfa
--- /dev/null
+++ b/queue-5.17/drm-amd-display-undo-clearing-of-z10-related-functio.patch
@@ -0,0 +1,42 @@
+From 030fc735932ad571a12a691fae8e821fd3e0300b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 19 Mar 2022 16:34:24 -0400
+Subject: drm/amd/display: undo clearing of z10 related function pointers
+
+From: Eric Yang <Eric.Yang2@amd.com>
+
+[ Upstream commit 9b9bd3f640640f94272a461b2dfe558f91b322c5 ]
+
+[Why]
+Z10 and S0i3 have some shared path. Previous code clean up ,
+incorrectly removed these pointers, which breaks s0i3 restore
+
+[How]
+Do not clear the function pointers based on Z10 disable.
+
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Pavle Kotarac <Pavle.Kotarac@amd.com>
+Signed-off-by: Eric Yang <Eric.Yang2@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+index d7559e5a99ce..e708f07fe75a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+@@ -153,9 +153,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
+ dc->hwss.init_hw = dcn20_fpga_init_hw;
+ dc->hwseq->funcs.init_pipes = NULL;
+ }
+- if (dc->debug.disable_z10) {
+- /*hw not support z10 or sw disable it*/
+- dc->hwss.z10_restore = NULL;
+- dc->hwss.z10_save_init = NULL;
+- }
+ }
+--
+2.35.1
+
diff --git a/queue-5.17/ethernet-tulip-fix-missing-pci_disable_device-on-err.patch b/queue-5.17/ethernet-tulip-fix-missing-pci_disable_device-on-err.patch
new file mode 100644
index 0000000..a7d1225
--- /dev/null
+++ b/queue-5.17/ethernet-tulip-fix-missing-pci_disable_device-on-err.patch
@@ -0,0 +1,49 @@
+From 9bb00bb90329f88284d7041e413605ca532b2801 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 May 2022 17:42:50 +0800
+Subject: ethernet: tulip: fix missing pci_disable_device() on error in
+ tulip_init_one()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+[ Upstream commit 51ca86b4c9c7c75f5630fa0dbe5f8f0bd98e3c3e ]
+
+Fix the missing pci_disable_device() before return
+from tulip_init_one() in the error handling case.
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Link: https://lore.kernel.org/r/20220506094250.3630615-1-yangyingliang@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/dec/tulip/tulip_core.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
+index 79df5a72877b..0040dcaab945 100644
+--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
++++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
+@@ -1399,8 +1399,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ /* alloc_etherdev ensures aligned and zeroed private structures */
+ dev = alloc_etherdev (sizeof (*tp));
+- if (!dev)
++ if (!dev) {
++ pci_disable_device(pdev);
+ return -ENOMEM;
++ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
+@@ -1785,6 +1787,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ err_out_free_netdev:
+ free_netdev (dev);
++ pci_disable_device(pdev);
+ return -ENODEV;
+ }
+
+--
+2.35.1
+
diff --git a/queue-5.17/fbdev-prevent-possible-use-after-free-in-fb_release.patch b/queue-5.17/fbdev-prevent-possible-use-after-free-in-fb_release.patch
new file mode 100644
index 0000000..52dfd73
--- /dev/null
+++ b/queue-5.17/fbdev-prevent-possible-use-after-free-in-fb_release.patch
@@ -0,0 +1,47 @@
+From 389b61343bbcd31b27e73ba7ef7e189c996166e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 May 2022 00:04:13 +0200
+Subject: fbdev: Prevent possible use-after-free in fb_release()
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+[ Upstream commit 89bfd4017e58faaf70411555e7f508495114e90b ]
+
+Most fbdev drivers have issues with the fb_info lifetime, because call to
+framebuffer_release() from their driver's .remove callback, rather than
+doing from fbops.fb_destroy callback.
+
+Doing that will destroy the fb_info too early, while references to it may
+still exist, leading to a use-after-free error.
+
+To prevent this, check the fb_info reference counter when attempting to
+kfree the data structure in framebuffer_release(). That will leak it but
+at least will prevent the mentioned error.
+
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220505220413.365977-1-javierm@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/fbsysfs.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
+index 26892940c213..82e31a2d845e 100644
+--- a/drivers/video/fbdev/core/fbsysfs.c
++++ b/drivers/video/fbdev/core/fbsysfs.c
+@@ -80,6 +80,10 @@ void framebuffer_release(struct fb_info *info)
+ {
+ if (!info)
+ return;
++
++ if (WARN_ON(refcount_read(&info->count)))
++ return;
++
+ kfree(info->apertures);
+ kfree(info);
+ }
+--
+2.35.1
+
diff --git a/queue-5.17/gpio-gpio-vf610-do-not-touch-other-bits-when-set-the.patch b/queue-5.17/gpio-gpio-vf610-do-not-touch-other-bits-when-set-the.patch
new file mode 100644
index 0000000..734e9c6
--- /dev/null
+++ b/queue-5.17/gpio-gpio-vf610-do-not-touch-other-bits-when-set-the.patch
@@ -0,0 +1,45 @@
+From 331f68136039f44c2b9a8eef5f7497c563cc80b5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 May 2022 10:15:04 +0800
+Subject: gpio: gpio-vf610: do not touch other bits when set the target bit
+
+From: Haibo Chen <haibo.chen@nxp.com>
+
+[ Upstream commit 9bf3ac466faa83d51a8fe9212131701e58fdef74 ]
+
+For gpio controller contain register PDDR, when set one target bit,
+current logic will clear all other bits, this is wrong. Use operator
+'|=' to fix it.
+
+Fixes: 659d8a62311f ("gpio: vf610: add imx7ulp support")
+Reviewed-by: Peng Fan <peng.fan@nxp.com>
+Signed-off-by: Haibo Chen <haibo.chen@nxp.com>
+Signed-off-by: Bartosz Golaszewski <brgl@bgdev.pl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpio/gpio-vf610.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
+index 20780c35da1b..23cddb265a0d 100644
+--- a/drivers/gpio/gpio-vf610.c
++++ b/drivers/gpio/gpio-vf610.c
+@@ -125,9 +125,13 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+ {
+ struct vf610_gpio_port *port = gpiochip_get_data(chip);
+ unsigned long mask = BIT(gpio);
++ u32 val;
+
+- if (port->sdata && port->sdata->have_paddr)
+- vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR);
++ if (port->sdata && port->sdata->have_paddr) {
++ val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
++ val |= mask;
++ vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
++ }
+
+ vf610_gpio_set(chip, gpio, value);
+
+--
+2.35.1
+
diff --git a/queue-5.17/gpio-mvebu-pwm-refuse-requests-with-inverted-polarit.patch b/queue-5.17/gpio-mvebu-pwm-refuse-requests-with-inverted-polarit.patch
new file mode 100644
index 0000000..0d63fce
--- /dev/null
+++ b/queue-5.17/gpio-mvebu-pwm-refuse-requests-with-inverted-polarit.patch
@@ -0,0 +1,40 @@
+From 606c7db2662668a209ce8501276e9d6379dcebea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 May 2022 09:58:56 +0200
+Subject: gpio: mvebu/pwm: Refuse requests with inverted polarity
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 3ecb10175b1f776f076553c24e2689e42953fef5 ]
+
+The driver doesn't take struct pwm_state::polarity into account when
+configuring the hardware, so refuse requests for inverted polarity.
+
+Fixes: 757642f9a584 ("gpio: mvebu: Add limited PWM support")
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Signed-off-by: Bartosz Golaszewski <brgl@bgdev.pl>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpio/gpio-mvebu.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index a2c8dd329b31..2db19cd640a4 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -707,6 +707,9 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ unsigned long flags;
+ unsigned int on, off;
+
++ if (state->polarity != PWM_POLARITY_NORMAL)
++ return -EINVAL;
++
+ val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
+ do_div(val, NSEC_PER_SEC);
+ if (val > UINT_MAX + 1ULL)
+--
+2.35.1
+
diff --git a/queue-5.17/i915-guc-reset-make-__guc_reset_context-aware-of-gui.patch b/queue-5.17/i915-guc-reset-make-__guc_reset_context-aware-of-gui.patch
new file mode 100644
index 0000000..b1b0e3e
--- /dev/null
+++ b/queue-5.17/i915-guc-reset-make-__guc_reset_context-aware-of-gui.patch
@@ -0,0 +1,155 @@
+From 2c7e9b10dba4d2a708a5210a820432e4a8b66bd3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Apr 2022 17:30:45 -0700
+Subject: i915/guc/reset: Make __guc_reset_context aware of guilty engines
+
+From: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
+
+[ Upstream commit 89e96d822bd51f7afe2d3e95a34099480b5c3d55 ]
+
+There are 2 ways an engine can get reset in i915 and the method of reset
+affects how KMD labels a context as guilty/innocent.
+
+(1) GuC initiated engine-reset: GuC resets a hung engine and notifies
+KMD. The context that hung on the engine is marked guilty and all other
+contexts are innocent. The innocent contexts are resubmitted.
+
+(2) GT based reset: When an engine heartbeat fails to tick, KMD
+initiates a gt/chip reset. All active contexts are marked as guilty and
+discarded.
+
+In order to correctly mark the contexts as guilty/innocent, pass a mask
+of engines that were reset to __guc_reset_context.
+
+Fixes: eb5e7da736f3 ("drm/i915/guc: Reset implementation for new GuC interface")
+Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
+Reviewed-by: Alan Previn <alan.previn.teres.alexis@intel.com>
+Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220426003045.3929439-1-umesh.nerlige.ramappa@intel.com
+(cherry picked from commit 303760aa914b7f5ac9602dbb4b471a2ad52eeb3e)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gt/intel_reset.c | 2 +-
+ drivers/gpu/drm/i915/gt/uc/intel_guc.h | 2 +-
+ .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 16 ++++++++--------
+ drivers/gpu/drm/i915/gt/uc/intel_uc.c | 2 +-
+ drivers/gpu/drm/i915/gt/uc/intel_uc.h | 2 +-
+ 5 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
+index 7be0002d9d70..f577582ddd9f 100644
+--- a/drivers/gpu/drm/i915/gt/intel_reset.c
++++ b/drivers/gpu/drm/i915/gt/intel_reset.c
+@@ -791,7 +791,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
+ __intel_engine_reset(engine, stalled_mask & engine->mask);
+ local_bh_enable();
+
+- intel_uc_reset(>->uc, true);
++ intel_uc_reset(>->uc, ALL_ENGINES);
+
+ intel_ggtt_restore_fences(gt->ggtt);
+
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+index 3aabe164c329..e1fb8e1da128 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+@@ -417,7 +417,7 @@ int intel_guc_global_policies_update(struct intel_guc *guc);
+ void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
+
+ void intel_guc_submission_reset_prepare(struct intel_guc *guc);
+-void intel_guc_submission_reset(struct intel_guc *guc, bool stalled);
++void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled);
+ void intel_guc_submission_reset_finish(struct intel_guc *guc);
+ void intel_guc_submission_cancel_requests(struct intel_guc *guc);
+
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index 154ad726e266..1e51a365833b 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -1603,9 +1603,9 @@ __unwind_incomplete_requests(struct intel_context *ce)
+ spin_unlock_irqrestore(&sched_engine->lock, flags);
+ }
+
+-static void __guc_reset_context(struct intel_context *ce, bool stalled)
++static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
+ {
+- bool local_stalled;
++ bool guilty;
+ struct i915_request *rq;
+ unsigned long flags;
+ u32 head;
+@@ -1647,7 +1647,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
+ if (!intel_context_is_pinned(ce))
+ goto next_context;
+
+- local_stalled = false;
++ guilty = false;
+ rq = intel_context_find_active_request(ce);
+ if (!rq) {
+ head = ce->ring->tail;
+@@ -1655,14 +1655,14 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
+ }
+
+ if (i915_request_started(rq))
+- local_stalled = true;
++ guilty = stalled & ce->engine->mask;
+
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ head = intel_ring_wrap(ce->ring, rq->head);
+
+- __i915_request_reset(rq, local_stalled && stalled);
++ __i915_request_reset(rq, guilty);
+ out_replay:
+- guc_reset_state(ce, head, local_stalled && stalled);
++ guc_reset_state(ce, head, guilty);
+ next_context:
+ if (i != number_children)
+ ce = list_next_entry(ce, parallel.child_link);
+@@ -1673,7 +1673,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled)
+ intel_context_put(parent);
+ }
+
+-void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
++void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
+ {
+ struct intel_context *ce;
+ unsigned long index;
+@@ -4042,7 +4042,7 @@ static void guc_context_replay(struct intel_context *ce)
+ {
+ struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
+
+- __guc_reset_context(ce, true);
++ __guc_reset_context(ce, ce->engine->mask);
+ tasklet_hi_schedule(&sched_engine->tasklet);
+ }
+
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+index 09ed29df67bc..cbfb5a01cc1d 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+@@ -592,7 +592,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
+ __uc_sanitize(uc);
+ }
+
+-void intel_uc_reset(struct intel_uc *uc, bool stalled)
++void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled)
+ {
+ struct intel_guc *guc = &uc->guc;
+
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+index 866b462821c0..a8f38c2c60e2 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
++++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+@@ -42,7 +42,7 @@ void intel_uc_driver_late_release(struct intel_uc *uc);
+ void intel_uc_driver_remove(struct intel_uc *uc);
+ void intel_uc_init_mmio(struct intel_uc *uc);
+ void intel_uc_reset_prepare(struct intel_uc *uc);
+-void intel_uc_reset(struct intel_uc *uc, bool stalled);
++void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled);
+ void intel_uc_reset_finish(struct intel_uc *uc);
+ void intel_uc_cancel_requests(struct intel_uc *uc);
+ void intel_uc_suspend(struct intel_uc *uc);
+--
+2.35.1
+
diff --git a/queue-5.17/ice-fix-crash-when-writing-timestamp-on-rx-rings.patch b/queue-5.17/ice-fix-crash-when-writing-timestamp-on-rx-rings.patch
new file mode 100644
index 0000000..425ddb2
--- /dev/null
+++ b/queue-5.17/ice-fix-crash-when-writing-timestamp-on-rx-rings.patch
@@ -0,0 +1,107 @@
+From b29e9579672c22ae69594d64cd944f47978b4358 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Apr 2022 10:33:50 +0200
+Subject: ice: fix crash when writing timestamp on RX rings
+
+From: Arkadiusz Kubalewski <arkadiusz.kubalewski@intel.com>
+
+[ Upstream commit 4503cc7fdf9a84cd631b0cb8ecb3c9b1bdbf3594 ]
+
+Do not allow to write timestamps on RX rings if PF is being configured.
+When PF is being configured RX rings can be freed or rebuilt. If at the
+same time timestamps are updated, the kernel will crash by dereferencing
+null RX ring pointer.
+
+PID: 1449 TASK: ff187d28ed658040 CPU: 34 COMMAND: "ice-ptp-0000:51"
+ #0 [ff1966a94a713bb0] machine_kexec at ffffffff9d05a0be
+ #1 [ff1966a94a713c08] __crash_kexec at ffffffff9d192e9d
+ #2 [ff1966a94a713cd0] crash_kexec at ffffffff9d1941bd
+ #3 [ff1966a94a713ce8] oops_end at ffffffff9d01bd54
+ #4 [ff1966a94a713d08] no_context at ffffffff9d06bda4
+ #5 [ff1966a94a713d60] __bad_area_nosemaphore at ffffffff9d06c10c
+ #6 [ff1966a94a713da8] do_page_fault at ffffffff9d06cae4
+ #7 [ff1966a94a713de0] page_fault at ffffffff9da0107e
+ [exception RIP: ice_ptp_update_cached_phctime+91]
+ RIP: ffffffffc076db8b RSP: ff1966a94a713e98 RFLAGS: 00010246
+ RAX: 16e3db9c6b7ccae4 RBX: ff187d269dd3c180 RCX: ff187d269cd4d018
+ RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
+ RBP: ff187d269cfcc644 R8: ff187d339b9641b0 R9: 0000000000000000
+ R10: 0000000000000002 R11: 0000000000000000 R12: ff187d269cfcc648
+ R13: ffffffff9f128784 R14: ffffffff9d101b70 R15: ff187d269cfcc640
+ ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018
+ #8 [ff1966a94a713ea0] ice_ptp_periodic_work at ffffffffc076dbef [ice]
+ #9 [ff1966a94a713ee0] kthread_worker_fn at ffffffff9d101c1b
+ #10 [ff1966a94a713f10] kthread at ffffffff9d101b4d
+ #11 [ff1966a94a713f50] ret_from_fork at ffffffff9da0023f
+
+Fixes: 77a781155a65 ("ice: enable receive hardware timestamping")
+Signed-off-by: Arkadiusz Kubalewski <arkadiusz.kubalewski@intel.com>
+Reviewed-by: Michal Schmidt <mschmidt@redhat.com>
+Tested-by: Dave Cain <dcain@redhat.com>
+Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_ptp.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index 45ae97b8b97d..836c67f1aa46 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -499,12 +499,19 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old. It must also be called whenever the PHC
+ * time has been changed.
++ *
++ * Return:
++ * * 0 - OK, successfully updated
++ * * -EAGAIN - PF was busy, need to reschedule the update
+ */
+-static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
++static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
+ {
+ u64 systime;
+ int i;
+
++ if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
++ return -EAGAIN;
++
+ /* Read the current PHC time */
+ systime = ice_ptp_read_src_clk_reg(pf, NULL);
+
+@@ -527,6 +534,9 @@ static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
+ WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
+ }
+ }
++ clear_bit(ICE_CFG_BUSY, pf->state);
++
++ return 0;
+ }
+
+ /**
+@@ -2322,17 +2332,18 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
+ {
+ struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
+ struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
++ int err;
+
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return;
+
+- ice_ptp_update_cached_phctime(pf);
++ err = ice_ptp_update_cached_phctime(pf);
+
+ ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
+
+- /* Run twice a second */
++ /* Run twice a second or reschedule if phc update failed */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work,
+- msecs_to_jiffies(500));
++ msecs_to_jiffies(err ? 10 : 500));
+ }
+
+ /**
+--
+2.35.1
+
diff --git a/queue-5.17/ice-fix-interrupt-moderation-settings-getting-cleare.patch b/queue-5.17/ice-fix-interrupt-moderation-settings-getting-cleare.patch
new file mode 100644
index 0000000..097f0c9
--- /dev/null
+++ b/queue-5.17/ice-fix-interrupt-moderation-settings-getting-cleare.patch
@@ -0,0 +1,116 @@
+From 95f90d95c3fe55e3b9c8d6627aae482166398563 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 8 May 2022 19:33:48 -0400
+Subject: ice: Fix interrupt moderation settings getting cleared
+
+From: Michal Wilczynski <michal.wilczynski@intel.com>
+
+[ Upstream commit bf13502ed5f941b0777b3fd1e24dac5d93f3886c ]
+
+Adaptive-rx and Adaptive-tx are interrupt moderation settings
+that can be enabled/disabled using ethtool:
+ethtool -C ethX adaptive-rx on/off adaptive-tx on/off
+
+Unfortunately those settings are getting cleared after
+changing number of queues, or in ethtool world 'channels':
+ethtool -L ethX rx 1 tx 1
+
+Clearing was happening due to introduction of bit fields
+in ice_ring_container struct. This way only itr_setting
+bits were rebuilt during ice_vsi_rebuild_set_coalesce().
+
+Introduce an anonymous struct of bitfields and create a
+union to refer to them as a single variable.
+This way variable can be easily saved and restored.
+
+Fixes: 61dc79ced7aa ("ice: Restore interrupt throttle settings after VSI rebuild")
+Signed-off-by: Michal Wilczynski <michal.wilczynski@intel.com>
+Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_lib.c | 16 ++++++++--------
+ drivers/net/ethernet/intel/ice/ice_txrx.h | 11 ++++++++---
+ 2 files changed, 16 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 15bb6f001a04..5f86cc1cfd09 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -3207,8 +3207,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+- coalesce[i].itr_tx = q_vector->tx.itr_setting;
+- coalesce[i].itr_rx = q_vector->rx.itr_setting;
++ coalesce[i].itr_tx = q_vector->tx.itr_settings;
++ coalesce[i].itr_rx = q_vector->rx.itr_settings;
+ coalesce[i].intrl = q_vector->intrl;
+
+ if (i < vsi->num_txq)
+@@ -3264,21 +3264,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
+ */
+ if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
+ rc = &vsi->q_vectors[i]->rx;
+- rc->itr_setting = coalesce[i].itr_rx;
++ rc->itr_settings = coalesce[i].itr_rx;
+ ice_write_itr(rc, rc->itr_setting);
+ } else if (i < vsi->alloc_rxq) {
+ rc = &vsi->q_vectors[i]->rx;
+- rc->itr_setting = coalesce[0].itr_rx;
++ rc->itr_settings = coalesce[0].itr_rx;
+ ice_write_itr(rc, rc->itr_setting);
+ }
+
+ if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
+ rc = &vsi->q_vectors[i]->tx;
+- rc->itr_setting = coalesce[i].itr_tx;
++ rc->itr_settings = coalesce[i].itr_tx;
+ ice_write_itr(rc, rc->itr_setting);
+ } else if (i < vsi->alloc_txq) {
+ rc = &vsi->q_vectors[i]->tx;
+- rc->itr_setting = coalesce[0].itr_tx;
++ rc->itr_settings = coalesce[0].itr_tx;
+ ice_write_itr(rc, rc->itr_setting);
+ }
+
+@@ -3292,12 +3292,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
+ for (; i < vsi->num_q_vectors; i++) {
+ /* transmit */
+ rc = &vsi->q_vectors[i]->tx;
+- rc->itr_setting = coalesce[0].itr_tx;
++ rc->itr_settings = coalesce[0].itr_tx;
+ ice_write_itr(rc, rc->itr_setting);
+
+ /* receive */
+ rc = &vsi->q_vectors[i]->rx;
+- rc->itr_setting = coalesce[0].itr_rx;
++ rc->itr_settings = coalesce[0].itr_rx;
+ ice_write_itr(rc, rc->itr_setting);
+
+ vsi->q_vectors[i]->intrl = coalesce[0].intrl;
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index b7b3bd4816f0..ec4733272034 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -379,9 +379,14 @@ struct ice_ring_container {
+ /* this matches the maximum number of ITR bits, but in usec
+ * values, so it is shifted left one bit (bit zero is ignored)
+ */
+- u16 itr_setting:13;
+- u16 itr_reserved:2;
+- u16 itr_mode:1;
++ union {
++ struct {
++ u16 itr_setting:13;
++ u16 itr_reserved:2;
++ u16 itr_mode:1;
++ };
++ u16 itr_settings;
++ };
+ enum ice_container_type type;
+ };
+
+--
+2.35.1
+
diff --git a/queue-5.17/ice-fix-possible-under-reporting-of-ethtool-tx-and-r.patch b/queue-5.17/ice-fix-possible-under-reporting-of-ethtool-tx-and-r.patch
new file mode 100644
index 0000000..6fa43d4
--- /dev/null
+++ b/queue-5.17/ice-fix-possible-under-reporting-of-ethtool-tx-and-r.patch
@@ -0,0 +1,51 @@
+From c6696a222e424f910946ad65a927c21eee514137 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Apr 2022 14:11:42 -0700
+Subject: ice: fix possible under reporting of ethtool Tx and Rx statistics
+
+From: Paul Greenwalt <paul.greenwalt@intel.com>
+
+[ Upstream commit 31b6298fd8e29effe9ed6b77351ac5969be56ce0 ]
+
+The hardware statistics counters are not cleared during resets so the
+drivers first access is to initialize the baseline and then subsequent
+reads are for reporting the counters. The statistics counters are read
+during the watchdog subtask when the interface is up. If the baseline
+is not initialized before the interface is up, then there can be a brief
+window in which some traffic can be transmitted/received before the
+initial baseline reading takes place.
+
+Directly initialize ethtool statistics in driver open so the baseline will
+be initialized when the interface is up, and any dropped packets
+incremented before the interface is up won't be reported.
+
+Fixes: 28dc1b86f8ea9 ("ice: ignore dropped packets during init")
+Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
+Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 7f6715eb862f..30f055e1a92a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -5907,9 +5907,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
+ ice_ptp_link_change(pf, pf->hw.pf_id, true);
+ }
+
+- /* clear this now, and the first stats read will be used as baseline */
+- vsi->stat_offsets_loaded = false;
+-
++ /* Perform an initial read of the statistics registers now to
++ * set the baseline so counters are ready when interface is up
++ */
++ ice_update_eth_stats(vsi);
+ ice_service_task_schedule(pf);
+
+ return 0;
+--
+2.35.1
+
diff --git a/queue-5.17/igb-skip-phy-status-check-where-unavailable.patch b/queue-5.17/igb-skip-phy-status-check-where-unavailable.patch
new file mode 100644
index 0000000..93fcead
--- /dev/null
+++ b/queue-5.17/igb-skip-phy-status-check-where-unavailable.patch
@@ -0,0 +1,46 @@
+From b361eb5f8ebd93bb3cbff1791411a89be8baee05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 May 2022 11:01:05 -0700
+Subject: igb: skip phy status check where unavailable
+
+From: Kevin Mitchell <kevmitch@arista.com>
+
+[ Upstream commit 942d2ad5d2e0df758a645ddfadffde2795322728 ]
+
+igb_read_phy_reg() will silently return, leaving phy_data untouched, if
+hw->ops.read_reg isn't set. Depending on the uninitialized value of
+phy_data, this led to the phy status check either succeeding immediately
+or looping continuously for 2 seconds before emitting a noisy err-level
+timeout. This message went out to the console even though there was no
+actual problem.
+
+Instead, first check if there is read_reg function pointer. If not,
+proceed without trying to check the phy status register.
+
+Fixes: b72f3f72005d ("igb: When GbE link up, wait for Remote receiver status condition")
+Signed-off-by: Kevin Mitchell <kevmitch@arista.com>
+Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index c1e4ad65b02d..4e0abfe68cfd 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -5512,7 +5512,8 @@ static void igb_watchdog_task(struct work_struct *work)
+ break;
+ }
+
+- if (adapter->link_speed != SPEED_1000)
++ if (adapter->link_speed != SPEED_1000 ||
++ !hw->phy.ops.read_reg)
+ goto no_wait;
+
+ /* wait for Remote receiver status OK */
+--
+2.35.1
+
diff --git a/queue-5.17/kvm-x86-pmu-fix-the-compare-function-used-by-the-pmu.patch b/queue-5.17/kvm-x86-pmu-fix-the-compare-function-used-by-the-pmu.patch
new file mode 100644
index 0000000..e706e05
--- /dev/null
+++ b/queue-5.17/kvm-x86-pmu-fix-the-compare-function-used-by-the-pmu.patch
@@ -0,0 +1,49 @@
+From 60a912e144bc006f5ab66c298f5457a25c909eff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 May 2022 05:12:36 +0000
+Subject: kvm: x86/pmu: Fix the compare function used by the pmu event filter
+
+From: Aaron Lewis <aaronlewis@google.com>
+
+[ Upstream commit 4ac19ead0dfbabd8e0bfc731f507cfb0b95d6c99 ]
+
+When returning from the compare function the u64 is truncated to an
+int. This results in a loss of the high nybble[1] in the event select
+and its sign if that nybble is in use. Switch from using a result that
+can end up being truncated to a result that can only be: 1, 0, -1.
+
+[1] bits 35:32 in the event select register and bits 11:8 in the event
+ select.
+
+Fixes: 7ff775aca48ad ("KVM: x86/pmu: Use binary search to check filtered events")
+Signed-off-by: Aaron Lewis <aaronlewis@google.com>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20220517051238.2566934-1-aaronlewis@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/pmu.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index eca39f56c231..0604bc29f0b8 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -171,9 +171,12 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
+ return true;
+ }
+
+-static int cmp_u64(const void *a, const void *b)
++static int cmp_u64(const void *pa, const void *pb)
+ {
+- return *(__u64 *)a - *(__u64 *)b;
++ u64 a = *(u64 *)pa;
++ u64 b = *(u64 *)pb;
++
++ return (a > b) - (a < b);
+ }
+
+ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+--
+2.35.1
+
diff --git a/queue-5.17/mac80211-fix-rx-reordering-with-non-explicit-psmp-ac.patch b/queue-5.17/mac80211-fix-rx-reordering-with-non-explicit-psmp-ac.patch
new file mode 100644
index 0000000..b201b3e
--- /dev/null
+++ b/queue-5.17/mac80211-fix-rx-reordering-with-non-explicit-psmp-ac.patch
@@ -0,0 +1,38 @@
+From 779fc8a271f3339a89823a1b06c10ec25b35bfe4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Apr 2022 12:50:38 +0200
+Subject: mac80211: fix rx reordering with non explicit / psmp ack policy
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit 5e469ed9764d4722c59562da13120bd2dc6834c5 ]
+
+When the QoS ack policy was set to non explicit / psmp ack, frames are treated
+as not being part of a BA session, which causes extra latency on reordering.
+Fix this by only bypassing reordering for packets with no-ack policy
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Link: https://lore.kernel.org/r/20220420105038.36443-1-nbd@nbd.name
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/rx.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 48d9553dafe3..7e2404fd85b6 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -1405,8 +1405,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
+ goto dont_reorder;
+
+ /* not part of a BA session */
+- if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
+- ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
++ if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+ goto dont_reorder;
+
+ /* new, potentially un-ordered, ampdu frame - process it */
+--
+2.35.1
+
diff --git a/queue-5.17/mptcp-do-tcp-fallback-on-early-dss-checksum-failure.patch b/queue-5.17/mptcp-do-tcp-fallback-on-early-dss-checksum-failure.patch
new file mode 100644
index 0000000..ed2e7a2
--- /dev/null
+++ b/queue-5.17/mptcp-do-tcp-fallback-on-early-dss-checksum-failure.patch
@@ -0,0 +1,105 @@
+From eefba3dc384cddc4489efcd6b32d9e05f38d00d4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 May 2022 11:02:12 -0700
+Subject: mptcp: Do TCP fallback on early DSS checksum failure
+
+From: Mat Martineau <mathew.j.martineau@linux.intel.com>
+
+[ Upstream commit ae66fb2ba6c3dcaf8b9612b65aa949a1a4bed150 ]
+
+RFC 8684 section 3.7 describes several opportunities for a MPTCP
+connection to "fall back" to regular TCP early in the connection
+process, before it has been confirmed that MPTCP options can be
+successfully propagated on all SYN, SYN/ACK, and data packets. If a peer
+acknowledges the first received data packet with a regular TCP header
+(no MPTCP options), fallback is allowed.
+
+If the recipient of that first data packet finds a MPTCP DSS checksum
+error, this provides an opportunity to fail gracefully with a TCP
+fallback rather than resetting the connection (as might happen if a
+checksum failure were detected later).
+
+This commit modifies the checksum failure code to attempt fallback on
+the initial subflow of a MPTCP connection, only if it's a failure in the
+first data mapping. In cases where the peer initiates the connection,
+requests checksums, is the first to send data, and the peer is sending
+incorrect checksums (see
+https://github.com/multipath-tcp/mptcp_net-next/issues/275), this allows
+the connection to proceed as TCP rather than reset.
+
+Fixes: dd8bcd1768ff ("mptcp: validate the data checksum")
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/protocol.h | 3 ++-
+ net/mptcp/subflow.c | 21 ++++++++++++++++++---
+ 2 files changed, 20 insertions(+), 4 deletions(-)
+
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index e4413b3e50c2..8015389859d9 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -443,7 +443,8 @@ struct mptcp_subflow_context {
+ can_ack : 1, /* only after processing the remote a key */
+ disposable : 1, /* ctx can be free at ulp release time */
+ stale : 1, /* unable to snd/rcv data, do not use for xmit */
+- local_id_valid : 1; /* local_id is correctly initialized */
++ local_id_valid : 1, /* local_id is correctly initialized */
++ valid_csum_seen : 1; /* at least one csum validated */
+ enum mptcp_data_avail data_avail;
+ u32 remote_nonce;
+ u64 thmac;
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index e27574e9f969..7a3a70067c80 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -958,11 +958,14 @@ static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *
+ subflow->map_data_csum);
+ if (unlikely(csum)) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
+- subflow->send_mp_fail = 1;
+- MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPFAILTX);
++ if (subflow->mp_join || subflow->valid_csum_seen) {
++ subflow->send_mp_fail = 1;
++ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPFAILTX);
++ }
+ return subflow->mp_join ? MAPPING_INVALID : MAPPING_DUMMY;
+ }
+
++ subflow->valid_csum_seen = 1;
+ return MAPPING_OK;
+ }
+
+@@ -1144,6 +1147,18 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
+ }
+ }
+
++static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
++{
++ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
++
++ if (subflow->mp_join)
++ return false;
++ else if (READ_ONCE(msk->csum_enabled))
++ return !subflow->valid_csum_seen;
++ else
++ return !subflow->fully_established;
++}
++
+ static bool subflow_check_data_avail(struct sock *ssk)
+ {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+@@ -1221,7 +1236,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ return true;
+ }
+
+- if (subflow->mp_join || subflow->fully_established) {
++ if (!subflow_can_fallback(subflow)) {
+ /* fatal protocol error, close the socket.
+ * subflow_error_report() will introduce the appropriate barriers
+ */
+--
+2.35.1
+
diff --git a/queue-5.17/mptcp-fix-checksum-byte-order.patch b/queue-5.17/mptcp-fix-checksum-byte-order.patch
new file mode 100644
index 0000000..3ff6abf
--- /dev/null
+++ b/queue-5.17/mptcp-fix-checksum-byte-order.patch
@@ -0,0 +1,153 @@
+From 89427fa0094e7fdd9425733a3f6f38853874085b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 May 2022 11:02:11 -0700
+Subject: mptcp: fix checksum byte order
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit ba2c89e0ea74a904d5231643245753d77422e7f5 ]
+
+The MPTCP code typecasts the checksum value to u16 and
+then converts it to big endian while storing the value into
+the MPTCP option.
+
+As a result, the wire encoding for little endian host is
+wrong, and that causes interoperabilty interoperability
+issues with other implementation or host with different endianness.
+
+Address the issue writing in the packet the unmodified __sum16 value.
+
+MPTCP checksum is disabled by default, interoperating with systems
+with bad mptcp-level csum encoding should cause fallback to TCP.
+
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/275
+Fixes: c5b39e26d003 ("mptcp: send out checksum for DSS")
+Fixes: 390b95a5fb84 ("mptcp: receive checksum for DSS")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/options.c | 36 ++++++++++++++++++++++++------------
+ net/mptcp/protocol.h | 2 +-
+ net/mptcp/subflow.c | 2 +-
+ 3 files changed, 26 insertions(+), 14 deletions(-)
+
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 645dd984fef0..9ac75689a99d 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -107,7 +107,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ ptr += 2;
+ }
+ if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM) {
+- mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr);
++ mp_opt->csum = get_unaligned((__force __sum16 *)ptr);
+ mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
+ ptr += 2;
+ }
+@@ -221,7 +221,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+
+ if (opsize == expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) {
+ mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
+- mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr);
++ mp_opt->csum = get_unaligned((__force __sum16 *)ptr);
+ ptr += 2;
+ }
+
+@@ -1236,7 +1236,7 @@ static void mptcp_set_rwin(const struct tcp_sock *tp)
+ WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
+ }
+
+-u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
++__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
+ {
+ struct csum_pseudo_header header;
+ __wsum csum;
+@@ -1252,15 +1252,25 @@ u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
+ header.csum = 0;
+
+ csum = csum_partial(&header, sizeof(header), sum);
+- return (__force u16)csum_fold(csum);
++ return csum_fold(csum);
+ }
+
+-static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
++static __sum16 mptcp_make_csum(const struct mptcp_ext *mpext)
+ {
+ return __mptcp_make_csum(mpext->data_seq, mpext->subflow_seq, mpext->data_len,
+ ~csum_unfold(mpext->csum));
+ }
+
++static void put_len_csum(u16 len, __sum16 csum, void *data)
++{
++ __sum16 *sumptr = data + 2;
++ __be16 *ptr = data;
++
++ put_unaligned_be16(len, ptr);
++
++ put_unaligned(csum, sumptr);
++}
++
+ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
+ struct mptcp_out_options *opts)
+ {
+@@ -1328,8 +1338,9 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
+ put_unaligned_be32(mpext->subflow_seq, ptr);
+ ptr += 1;
+ if (opts->csum_reqd) {
+- put_unaligned_be32(mpext->data_len << 16 |
+- mptcp_make_csum(mpext), ptr);
++ put_len_csum(mpext->data_len,
++ mptcp_make_csum(mpext),
++ ptr);
+ } else {
+ put_unaligned_be32(mpext->data_len << 16 |
+ TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
+@@ -1376,11 +1387,12 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
+ goto mp_capable_done;
+
+ if (opts->csum_reqd) {
+- put_unaligned_be32(opts->data_len << 16 |
+- __mptcp_make_csum(opts->data_seq,
+- opts->subflow_seq,
+- opts->data_len,
+- ~csum_unfold(opts->csum)), ptr);
++ put_len_csum(opts->data_len,
++ __mptcp_make_csum(opts->data_seq,
++ opts->subflow_seq,
++ opts->data_len,
++ ~csum_unfold(opts->csum)),
++ ptr);
+ } else {
+ put_unaligned_be32(opts->data_len << 16 |
+ TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index a1c845eb47bd..aec767ee047a 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -725,7 +725,7 @@ void mptcp_token_destroy(struct mptcp_sock *msk);
+ void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn);
+
+ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac);
+-u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum);
++__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum);
+
+ void __init mptcp_pm_init(void);
+ void mptcp_pm_data_init(struct mptcp_sock *msk);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 1d4d84efe8f5..651f01d13191 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -846,7 +846,7 @@ static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *
+ {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ u32 offset, seq, delta;
+- u16 csum;
++ __sum16 csum;
+ int len;
+
+ if (!csum_reqd)
+--
+2.35.1
+
diff --git a/queue-5.17/mptcp-fix-subflow-accounting-on-close.patch b/queue-5.17/mptcp-fix-subflow-accounting-on-close.patch
new file mode 100644
index 0000000..3077658
--- /dev/null
+++ b/queue-5.17/mptcp-fix-subflow-accounting-on-close.patch
@@ -0,0 +1,117 @@
+From 45cb208a5cd6f0e32e06789c20f0349c3aa5fc8a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 May 2022 16:26:41 -0700
+Subject: mptcp: fix subflow accounting on close
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 95d686517884a403412b000361cee2b08b2ed1e6 ]
+
+If the PM closes a fully established MPJ subflow or the subflow
+creation errors out in it's early stage the subflows counter is
+not bumped accordingly.
+
+This change adds the missing accounting, additionally taking care
+of updating accordingly the 'accept_subflow' flag.
+
+Fixes: a88c9e496937 ("mptcp: do not block subflows creation on errors")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/pm.c | 5 ++---
+ net/mptcp/protocol.h | 14 ++++++++++++++
+ net/mptcp/subflow.c | 12 +++++++++---
+ 3 files changed, 25 insertions(+), 6 deletions(-)
+
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index 7bea318ac5f2..1eb83cbe8aae 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -178,14 +178,13 @@ void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
+ struct mptcp_pm_data *pm = &msk->pm;
+ bool update_subflows;
+
+- update_subflows = (ssk->sk_state == TCP_CLOSE) &&
+- (subflow->request_join || subflow->mp_join);
++ update_subflows = subflow->request_join || subflow->mp_join;
+ if (!READ_ONCE(pm->work_pending) && !update_subflows)
+ return;
+
+ spin_lock_bh(&pm->lock);
+ if (update_subflows)
+- pm->subflows--;
++ __mptcp_pm_close_subflow(msk);
+
+ /* Even if this subflow is not really established, tell the PM to try
+ * to pick the next ones, if possible.
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 85317ce38e3f..a1c845eb47bd 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -835,6 +835,20 @@ unsigned int mptcp_pm_get_add_addr_accept_max(struct mptcp_sock *msk);
+ unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk);
+ unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk);
+
++/* called under PM lock */
++static inline void __mptcp_pm_close_subflow(struct mptcp_sock *msk)
++{
++ if (--msk->pm.subflows < mptcp_pm_get_subflows_max(msk))
++ WRITE_ONCE(msk->pm.accept_subflow, true);
++}
++
++static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk)
++{
++ spin_lock_bh(&msk->pm.lock);
++ __mptcp_pm_close_subflow(msk);
++ spin_unlock_bh(&msk->pm.lock);
++}
++
+ void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
+ void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
+
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index bea47a1180dc..1d4d84efe8f5 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1380,20 +1380,20 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ struct sockaddr_storage addr;
+ int remote_id = remote->id;
+ int local_id = loc->id;
++ int err = -ENOTCONN;
+ struct socket *sf;
+ struct sock *ssk;
+ u32 remote_token;
+ int addrlen;
+ int ifindex;
+ u8 flags;
+- int err;
+
+ if (!mptcp_is_fully_established(sk))
+- return -ENOTCONN;
++ goto err_out;
+
+ err = mptcp_subflow_create_socket(sk, &sf);
+ if (err)
+- return err;
++ goto err_out;
+
+ ssk = sf->sk;
+ subflow = mptcp_subflow_ctx(ssk);
+@@ -1456,6 +1456,12 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ failed:
+ subflow->disposable = 1;
+ sock_release(sf);
++
++err_out:
++ /* we account subflows before the creation, and this failures will not
++ * be caught by sk_state_change()
++ */
++ mptcp_pm_close_subflow(msk);
+ return err;
+ }
+
+--
+2.35.1
+
diff --git a/queue-5.17/mptcp-strict-local-address-id-selection.patch b/queue-5.17/mptcp-strict-local-address-id-selection.patch
new file mode 100644
index 0000000..12d3ada
--- /dev/null
+++ b/queue-5.17/mptcp-strict-local-address-id-selection.patch
@@ -0,0 +1,224 @@
+From dd874ab95a2df6fce00831bf16a06ed827639f6c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Mar 2022 12:44:37 -0800
+Subject: mptcp: strict local address ID selection
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 4cf86ae84c718333928fd2d43168a1e359a28329 ]
+
+The address ID selection for MPJ subflows created in response
+to incoming ADD_ADDR option is currently unreliable: it happens
+at MPJ socket creation time, when the local address could be
+unknown.
+
+Additionally, if the no local endpoint is available for the local
+address, a new dummy endpoint is created, confusing the user-land.
+
+This change refactor the code to move the address ID selection inside
+the rebuild_header() helper, when the local address eventually
+selected by the route lookup is finally known. If the address used
+is not mapped by any endpoint - and thus can't be advertised/removed
+pick the id 0 instead of allocate a new endpoint.
+
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/pm_netlink.c | 13 --------
+ net/mptcp/protocol.c | 3 ++
+ net/mptcp/protocol.h | 3 +-
+ net/mptcp/subflow.c | 67 ++++++++++++++++++++++++++++++++++++------
+ 4 files changed, 63 insertions(+), 23 deletions(-)
+
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 4b5d795383cd..ec73bd4be0a8 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -83,16 +83,6 @@ static bool addresses_equal(const struct mptcp_addr_info *a,
+ return a->port == b->port;
+ }
+
+-static bool address_zero(const struct mptcp_addr_info *addr)
+-{
+- struct mptcp_addr_info zero;
+-
+- memset(&zero, 0, sizeof(zero));
+- zero.family = addr->family;
+-
+- return addresses_equal(addr, &zero, true);
+-}
+-
+ static void local_address(const struct sock_common *skc,
+ struct mptcp_addr_info *addr)
+ {
+@@ -1011,9 +1001,6 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
+ if (addresses_equal(&msk_local, &skc_local, false))
+ return 0;
+
+- if (address_zero(&skc_local))
+- return 0;
+-
+ pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
+
+ rcu_read_lock();
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 014c9d88f947..cb90941840b1 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -117,6 +117,9 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
+ list_add(&subflow->node, &msk->conn_list);
+ sock_hold(ssock->sk);
+ subflow->request_mptcp = 1;
++
++ /* This is the first subflow, always with id 0 */
++ subflow->local_id_valid = 1;
+ mptcp_sock_graft(msk->first, sk->sk_socket);
+
+ return 0;
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index aec767ee047a..e4413b3e50c2 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -442,7 +442,8 @@ struct mptcp_subflow_context {
+ rx_eof : 1,
+ can_ack : 1, /* only after processing the remote a key */
+ disposable : 1, /* ctx can be free at ulp release time */
+- stale : 1; /* unable to snd/rcv data, do not use for xmit */
++ stale : 1, /* unable to snd/rcv data, do not use for xmit */
++ local_id_valid : 1; /* local_id is correctly initialized */
+ enum mptcp_data_avail data_avail;
+ u32 remote_nonce;
+ u64 thmac;
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 651f01d13191..e27574e9f969 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -483,6 +483,51 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ mptcp_subflow_reset(sk);
+ }
+
++static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
++{
++ subflow->local_id = local_id;
++ subflow->local_id_valid = 1;
++}
++
++static int subflow_chk_local_id(struct sock *sk)
++{
++ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
++ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
++ int err;
++
++ if (likely(subflow->local_id_valid))
++ return 0;
++
++ err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
++ if (err < 0)
++ return err;
++
++ subflow_set_local_id(subflow, err);
++ return 0;
++}
++
++static int subflow_rebuild_header(struct sock *sk)
++{
++ int err = subflow_chk_local_id(sk);
++
++ if (unlikely(err < 0))
++ return err;
++
++ return inet_sk_rebuild_header(sk);
++}
++
++#if IS_ENABLED(CONFIG_MPTCP_IPV6)
++static int subflow_v6_rebuild_header(struct sock *sk)
++{
++ int err = subflow_chk_local_id(sk);
++
++ if (unlikely(err < 0))
++ return err;
++
++ return inet6_sk_rebuild_header(sk);
++}
++#endif
++
+ struct request_sock_ops mptcp_subflow_request_sock_ops;
+ EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops);
+ static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
+@@ -1401,13 +1446,8 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ get_random_bytes(&subflow->local_nonce, sizeof(u32));
+ } while (!subflow->local_nonce);
+
+- if (!local_id) {
+- err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
+- if (err < 0)
+- goto failed;
+-
+- local_id = err;
+- }
++ if (local_id)
++ subflow_set_local_id(subflow, local_id);
+
+ mptcp_pm_get_flags_and_ifindex_by_id(sock_net(sk), local_id,
+ &flags, &ifindex);
+@@ -1432,7 +1472,6 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
+ remote_token, local_id, remote_id);
+ subflow->remote_token = remote_token;
+- subflow->local_id = local_id;
+ subflow->remote_id = remote_id;
+ subflow->request_join = 1;
+ subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+@@ -1737,15 +1776,22 @@ static void subflow_ulp_clone(const struct request_sock *req,
+ new_ctx->token = subflow_req->token;
+ new_ctx->ssn_offset = subflow_req->ssn_offset;
+ new_ctx->idsn = subflow_req->idsn;
++
++ /* this is the first subflow, id is always 0 */
++ new_ctx->local_id_valid = 1;
+ } else if (subflow_req->mp_join) {
+ new_ctx->ssn_offset = subflow_req->ssn_offset;
+ new_ctx->mp_join = 1;
+ new_ctx->fully_established = 1;
+ new_ctx->backup = subflow_req->backup;
+- new_ctx->local_id = subflow_req->local_id;
+ new_ctx->remote_id = subflow_req->remote_id;
+ new_ctx->token = subflow_req->token;
+ new_ctx->thmac = subflow_req->thmac;
++
++ /* the subflow req id is valid, fetched via subflow_check_req()
++ * and subflow_token_join_request()
++ */
++ subflow_set_local_id(new_ctx, subflow_req->local_id);
+ }
+ }
+
+@@ -1798,6 +1844,7 @@ void __init mptcp_subflow_init(void)
+ subflow_specific.conn_request = subflow_v4_conn_request;
+ subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
+ subflow_specific.sk_rx_dst_set = subflow_finish_connect;
++ subflow_specific.rebuild_header = subflow_rebuild_header;
+
+ tcp_prot_override = tcp_prot;
+ tcp_prot_override.release_cb = tcp_release_cb_override;
+@@ -1810,6 +1857,7 @@ void __init mptcp_subflow_init(void)
+ subflow_v6_specific.conn_request = subflow_v6_conn_request;
+ subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
+ subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
++ subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header;
+
+ subflow_v6m_specific = subflow_v6_specific;
+ subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
+@@ -1817,6 +1865,7 @@ void __init mptcp_subflow_init(void)
+ subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
+ subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
+ subflow_v6m_specific.net_frag_header_len = 0;
++ subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
+
+ tcpv6_prot_override = tcpv6_prot;
+ tcpv6_prot_override.release_cb = tcp_release_cb_override;
+--
+2.35.1
+
diff --git a/queue-5.17/net-af_key-add-check-for-pfkey_broadcast-in-function.patch b/queue-5.17/net-af_key-add-check-for-pfkey_broadcast-in-function.patch
new file mode 100644
index 0000000..7c92f7c
--- /dev/null
+++ b/queue-5.17/net-af_key-add-check-for-pfkey_broadcast-in-function.patch
@@ -0,0 +1,42 @@
+From 4bb9576383ad1ee46094b25f016c58ddf735f223 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 May 2022 17:42:31 +0800
+Subject: net: af_key: add check for pfkey_broadcast in function pfkey_process
+
+From: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+
+[ Upstream commit 4dc2a5a8f6754492180741facf2a8787f2c415d7 ]
+
+If skb_clone() returns null pointer, pfkey_broadcast() will
+return error.
+Therefore, it should be better to check the return value of
+pfkey_broadcast() and return error if fails.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Jiasheng Jiang <jiasheng@iscas.ac.cn>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/key/af_key.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index fd51db3be91c..92e9d75dba2f 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -2826,8 +2826,10 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
+ void *ext_hdrs[SADB_EXT_MAX];
+ int err;
+
+- pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
+- BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
++ err = pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
++ BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
++ if (err)
++ return err;
+
+ memset(ext_hdrs, 0, sizeof(ext_hdrs));
+ err = parse_exthdrs(skb, hdr, ext_hdrs);
+--
+2.35.1
+
diff --git a/queue-5.17/net-atlantic-add-check-for-max_skb_frags.patch b/queue-5.17/net-atlantic-add-check-for-max_skb_frags.patch
new file mode 100644
index 0000000..bb88fe1
--- /dev/null
+++ b/queue-5.17/net-atlantic-add-check-for-max_skb_frags.patch
@@ -0,0 +1,55 @@
+From f8253e9358afd00b506c956882d073a007c949ce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 19:28:25 -0700
+Subject: net: atlantic: add check for MAX_SKB_FRAGS
+
+From: Grant Grundler <grundler@chromium.org>
+
+[ Upstream commit 6aecbba12b5c90b26dc062af3b9de8c4b3a2f19f ]
+
+Enforce that the CPU can not get stuck in an infinite loop.
+
+Reported-by: Aashay Shringarpure <aashay@google.com>
+Reported-by: Yi Chou <yich@google.com>
+Reported-by: Shervin Oloumi <enlightened@google.com>
+Signed-off-by: Grant Grundler <grundler@chromium.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index bc1952131799..8201ce7adb77 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -363,6 +363,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ continue;
+
+ if (!buff->is_eop) {
++ unsigned int frag_cnt = 0U;
+ buff_ = buff;
+ do {
+ bool is_rsc_completed = true;
+@@ -371,6 +372,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ err = -EIO;
+ goto err_exit;
+ }
++
++ frag_cnt++;
+ next_ = buff_->next,
+ buff_ = &self->buff_ring[next_];
+ is_rsc_completed =
+@@ -378,7 +381,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ next_,
+ self->hw_head);
+
+- if (unlikely(!is_rsc_completed)) {
++ if (unlikely(!is_rsc_completed) ||
++ frag_cnt > MAX_SKB_FRAGS) {
+ err = 0;
+ goto err_exit;
+ }
+--
+2.35.1
+
diff --git a/queue-5.17/net-atlantic-fix-frag-0-not-initialized.patch b/queue-5.17/net-atlantic-fix-frag-0-not-initialized.patch
new file mode 100644
index 0000000..36f390d
--- /dev/null
+++ b/queue-5.17/net-atlantic-fix-frag-0-not-initialized.patch
@@ -0,0 +1,53 @@
+From b0475af0b123b7aec80cc671cb12cdedeec15b02 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 19:28:23 -0700
+Subject: net: atlantic: fix "frag[0] not initialized"
+
+From: Grant Grundler <grundler@chromium.org>
+
+[ Upstream commit 62e0ae0f4020250f961cf8d0103a4621be74e077 ]
+
+In aq_ring_rx_clean(), if buff->is_eop is not set AND
+buff->len < AQ_CFG_RX_HDR_SIZE, then hdr_len remains equal to
+buff->len and skb_add_rx_frag(xxx, *0*, ...) is not called.
+
+The loop following this code starts calling skb_add_rx_frag() starting
+with i=1 and thus frag[0] is never initialized. Since i is initialized
+to zero at the top of the primary loop, we can just reference and
+post-increment i instead of hardcoding the 0 when calling
+skb_add_rx_frag() the first time.
+
+Reported-by: Aashay Shringarpure <aashay@google.com>
+Reported-by: Yi Chou <yich@google.com>
+Reported-by: Shervin Oloumi <enlightened@google.com>
+Signed-off-by: Grant Grundler <grundler@chromium.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 77e76c9efd32..440423b0e8ea 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -446,7 +446,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ ALIGN(hdr_len, sizeof(long)));
+
+ if (buff->len - hdr_len > 0) {
+- skb_add_rx_frag(skb, 0, buff->rxdata.page,
++ skb_add_rx_frag(skb, i++, buff->rxdata.page,
+ buff->rxdata.pg_off + hdr_len,
+ buff->len - hdr_len,
+ AQ_CFG_RX_FRAME_MAX);
+@@ -455,7 +455,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+
+ if (!buff->is_eop) {
+ buff_ = buff;
+- i = 1U;
+ do {
+ next_ = buff_->next;
+ buff_ = &self->buff_ring[next_];
+--
+2.35.1
+
diff --git a/queue-5.17/net-atlantic-reduce-scope-of-is_rsc_complete.patch b/queue-5.17/net-atlantic-reduce-scope-of-is_rsc_complete.patch
new file mode 100644
index 0000000..49a0a38
--- /dev/null
+++ b/queue-5.17/net-atlantic-reduce-scope-of-is_rsc_complete.patch
@@ -0,0 +1,68 @@
+From 26c2988c1a8aa774ffed68964aa189d093dafdb3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 19:28:24 -0700
+Subject: net: atlantic: reduce scope of is_rsc_complete
+
+From: Grant Grundler <grundler@chromium.org>
+
+[ Upstream commit 79784d77ebbd3ec516b7a5ce555d979fb7946202 ]
+
+Don't defer handling the err case outside the loop. That's pointless.
+
+And since is_rsc_complete is only used inside this loop, declare
+it inside the loop to reduce it's scope.
+
+Signed-off-by: Grant Grundler <grundler@chromium.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 440423b0e8ea..bc1952131799 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -346,7 +346,6 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ int budget)
+ {
+ struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
+- bool is_rsc_completed = true;
+ int err = 0;
+
+ for (; (self->sw_head != self->hw_head) && budget;
+@@ -366,6 +365,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ if (!buff->is_eop) {
+ buff_ = buff;
+ do {
++ bool is_rsc_completed = true;
++
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+@@ -377,18 +378,16 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ next_,
+ self->hw_head);
+
+- if (unlikely(!is_rsc_completed))
+- break;
++ if (unlikely(!is_rsc_completed)) {
++ err = 0;
++ goto err_exit;
++ }
+
+ buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ } while (!buff_->is_eop);
+
+- if (!is_rsc_completed) {
+- err = 0;
+- goto err_exit;
+- }
+ if (buff->is_error ||
+ (buff->is_lro && buff->is_cso_err)) {
+ buff_ = buff;
+--
+2.35.1
+
diff --git a/queue-5.17/net-atlantic-verify-hw_head_-lies-within-tx-buffer-r.patch b/queue-5.17/net-atlantic-verify-hw_head_-lies-within-tx-buffer-r.patch
new file mode 100644
index 0000000..339e6dc
--- /dev/null
+++ b/queue-5.17/net-atlantic-verify-hw_head_-lies-within-tx-buffer-r.patch
@@ -0,0 +1,43 @@
+From b20077c1b4ac38dcd05896fc988b980c73225e1f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 19:28:26 -0700
+Subject: net: atlantic: verify hw_head_ lies within TX buffer ring
+
+From: Grant Grundler <grundler@chromium.org>
+
+[ Upstream commit 2120b7f4d128433ad8c5f503a9584deba0684901 ]
+
+Bounds check hw_head index provided by NIC to verify it lies
+within the TX buffer ring.
+
+Reported-by: Aashay Shringarpure <aashay@google.com>
+Reported-by: Yi Chou <yich@google.com>
+Reported-by: Shervin Oloumi <enlightened@google.com>
+Signed-off-by: Grant Grundler <grundler@chromium.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+index d875ce3ec759..15ede7285fb5 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+@@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ err = -ENXIO;
+ goto err_exit;
+ }
++
++ /* Validate that the new hw_head_ is reasonable. */
++ if (hw_head_ >= ring->size) {
++ err = -ENXIO;
++ goto err_exit;
++ }
++
+ ring->hw_head = hw_head_;
+ err = aq_hw_err_from_flags(self);
+
+--
+2.35.1
+
diff --git a/queue-5.17/net-bridge-clear-offload_fwd_mark-when-passing-frame.patch b/queue-5.17/net-bridge-clear-offload_fwd_mark-when-passing-frame.patch
new file mode 100644
index 0000000..26f64ef
--- /dev/null
+++ b/queue-5.17/net-bridge-clear-offload_fwd_mark-when-passing-frame.patch
@@ -0,0 +1,72 @@
+From b7612f970fa2ea34e1537c0e375e207f40cfdef6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 May 2022 02:58:40 +0200
+Subject: net: bridge: Clear offload_fwd_mark when passing frame up bridge
+ interface.
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+[ Upstream commit fbb3abdf2223cd0dfc07de85fe5a43ba7f435bdf ]
+
+It is possible to stack bridges on top of each other. Consider the
+following which makes use of an Ethernet switch:
+
+ br1
+ / \
+ / \
+ / \
+ br0.11 wlan0
+ |
+ br0
+ / | \
+p1 p2 p3
+
+br0 is offloaded to the switch. Above br0 is a vlan interface, for
+vlan 11. This vlan interface is then a slave of br1. br1 also has a
+wireless interface as a slave. This setup trunks wireless lan traffic
+over the copper network inside a VLAN.
+
+A frame received on p1 which is passed up to the bridge has the
+skb->offload_fwd_mark flag set to true, indicating that the switch has
+dealt with forwarding the frame out ports p2 and p3 as needed. This
+flag instructs the software bridge it does not need to pass the frame
+back down again. However, the flag is not getting reset when the frame
+is passed upwards. As a result br1 sees the flag, wrongly interprets
+it, and fails to forward the frame to wlan0.
+
+When passing a frame upwards, clear the flag. This is the Rx
+equivalent of br_switchdev_frame_unmark() in br_dev_xmit().
+
+Fixes: f1c2eddf4cb6 ("bridge: switchdev: Use an helper to clear forward mark")
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Tested-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://lore.kernel.org/r/20220518005840.771575-1-andrew@lunn.ch
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_input.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index b50382f957c1..6743c8a0fe8e 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -39,6 +39,13 @@ static int br_pass_frame_up(struct sk_buff *skb)
+ dev_sw_netstats_rx_add(brdev, skb->len);
+
+ vg = br_vlan_group_rcu(br);
++
++ /* Reset the offload_fwd_mark because there could be a stacked
++ * bridge above, and it should not think this bridge it doing
++ * that bridge's work forwarding out its ports.
++ */
++ br_switchdev_frame_unmark(skb);
++
+ /* Bridge is just like any other port. Make sure the
+ * packet is allowed except in promisc mode when someone
+ * may be running packet capture.
+--
+2.35.1
+
diff --git a/queue-5.17/net-fix-dev_fill_forward_path-with-pppoe-bridge.patch b/queue-5.17/net-fix-dev_fill_forward_path-with-pppoe-bridge.patch
new file mode 100644
index 0000000..9b47690
--- /dev/null
+++ b/queue-5.17/net-fix-dev_fill_forward_path-with-pppoe-bridge.patch
@@ -0,0 +1,69 @@
+From 74418e577755a8e710b5609b49222cfd6f31da4b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:26:15 +0200
+Subject: net: fix dev_fill_forward_path with pppoe + bridge
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit cf2df74e202d81b09f09d84c2d8903e0e87e9274 ]
+
+When calling dev_fill_forward_path on a pppoe device, the provided destination
+address is invalid. In order for the bridge fdb lookup to succeed, the pppoe
+code needs to update ctx->daddr to the correct value.
+Fix this by storing the address inside struct net_device_path_ctx
+
+Fixes: f6efc675c9dd ("net: ppp: resolve forwarding path for bridge pppoe devices")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ppp/pppoe.c | 1 +
+ include/linux/netdevice.h | 2 +-
+ net/core/dev.c | 2 +-
+ 3 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 3619520340b7..e172743948ed 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -988,6 +988,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
+ path->encap.proto = htons(ETH_P_PPP_SES);
+ path->encap.id = be16_to_cpu(po->num);
+ memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
++ memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
+ path->dev = ctx->dev;
+ ctx->dev = dev;
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index f53ea7038441..dadd4d2f6d8a 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -891,7 +891,7 @@ struct net_device_path_stack {
+
+ struct net_device_path_ctx {
+ const struct net_device *dev;
+- const u8 *daddr;
++ u8 daddr[ETH_ALEN];
+
+ int num_vlans;
+ struct {
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 91cf709c98b3..5f1ac4812277 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -663,11 +663,11 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
+ const struct net_device *last_dev;
+ struct net_device_path_ctx ctx = {
+ .dev = dev,
+- .daddr = daddr,
+ };
+ struct net_device_path *path;
+ int ret = 0;
+
++ memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
+ stack->num_paths = 0;
+ while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
+ last_dev = ctx.dev;
+--
+2.35.1
+
diff --git a/queue-5.17/net-fix-wrong-network-header-length.patch b/queue-5.17/net-fix-wrong-network-header-length.patch
new file mode 100644
index 0000000..89b4bcd
--- /dev/null
+++ b/queue-5.17/net-fix-wrong-network-header-length.patch
@@ -0,0 +1,71 @@
+From f054b20cd89fcc8876a786b8567f6e21078070d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 May 2022 13:48:49 +0800
+Subject: net: fix wrong network header length
+
+From: Lina Wang <lina.wang@mediatek.com>
+
+[ Upstream commit cf3ab8d4a797960b4be20565abb3bcd227b18a68 ]
+
+When clatd starts with ebpf offloaing, and NETIF_F_GRO_FRAGLIST is enable,
+several skbs are gathered in skb_shinfo(skb)->frag_list. The first skb's
+ipv6 header will be changed to ipv4 after bpf_skb_proto_6_to_4,
+network_header\transport_header\mac_header have been updated as ipv4 acts,
+but other skbs in frag_list didnot update anything, just ipv6 packets.
+
+udp_queue_rcv_skb will call skb_segment_list to traverse other skbs in
+frag_list and make sure right udp payload is delivered to user space.
+Unfortunately, other skbs in frag_list who are still ipv6 packets are
+updated like the first skb and will have wrong transport header length.
+
+e.g.before bpf_skb_proto_6_to_4,the first skb and other skbs in frag_list
+has the same network_header(24)& transport_header(64), after
+bpf_skb_proto_6_to_4, ipv6 protocol has been changed to ipv4, the first
+skb's network_header is 44,transport_header is 64, other skbs in frag_list
+didnot change.After skb_segment_list, the other skbs in frag_list has
+different network_header(24) and transport_header(44), so there will be 20
+bytes different from original,that is difference between ipv6 header and
+ipv4 header. Just change transport_header to be the same with original.
+
+Actually, there are two solutions to fix it, one is traversing all skbs
+and changing every skb header in bpf_skb_proto_6_to_4, the other is
+modifying frag_list skb's header in skb_segment_list. Considering
+efficiency, adopt the second one--- when the first skb and other skbs in
+frag_list has different network_header length, restore them to make sure
+right udp payload is delivered to user space.
+
+Signed-off-by: Lina Wang <lina.wang@mediatek.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skbuff.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 180fa6a26ad4..708cc9b1b176 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3896,7 +3896,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
+ unsigned int delta_len = 0;
+ struct sk_buff *tail = NULL;
+ struct sk_buff *nskb, *tmp;
+- int err;
++ int len_diff, err;
+
+ skb_push(skb, -skb_network_offset(skb) + offset);
+
+@@ -3936,9 +3936,11 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
+ skb_push(nskb, -skb_network_offset(nskb) + offset);
+
+ skb_release_head_state(nskb);
++ len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
+ __copy_skb_header(nskb, skb);
+
+ skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
++ nskb->transport_header += len_diff;
+ skb_copy_from_linear_data_offset(skb, -tnl_hlen,
+ nskb->data - tnl_hlen,
+ offset + tnl_hlen);
+--
+2.35.1
+
diff --git a/queue-5.17/net-ipa-certain-dropped-packets-aren-t-accounted-for.patch b/queue-5.17/net-ipa-certain-dropped-packets-aren-t-accounted-for.patch
new file mode 100644
index 0000000..49752fc
--- /dev/null
+++ b/queue-5.17/net-ipa-certain-dropped-packets-aren-t-accounted-for.patch
@@ -0,0 +1,56 @@
+From 7feaa42b0b802e9f0b50eb77ae0bab95559a2b57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 May 2022 10:10:31 -0500
+Subject: net: ipa: certain dropped packets aren't accounted for
+
+From: Alex Elder <elder@linaro.org>
+
+[ Upstream commit 30b338ff7998b6ed7a90815870cd5db725f87168 ]
+
+If an RX endpoint receives packets containing status headers, and a
+packet in the buffer is not dropped, ipa_endpoint_skb_copy() is
+responsible for wrapping the packet data in an SKB and forwarding it
+to ipa_modem_skb_rx() for further processing.
+
+If ipa_endpoint_skb_copy() gets a null pointer from build_skb(), it
+just returns early. But in the process it doesn't record that as a
+dropped packet in the network device statistics.
+
+Instead, call ipa_modem_skb_rx() whether or not the SKB pointer is
+NULL; that function ensures the statistics are properly updated.
+
+Fixes: 1b65bbcc9a710 ("net: ipa: skip SKB copy if no netdev")
+Signed-off-by: Alex Elder <elder@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipa/ipa_endpoint.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
+index 68291a3efd04..2ecfc17544a6 100644
+--- a/drivers/net/ipa/ipa_endpoint.c
++++ b/drivers/net/ipa/ipa_endpoint.c
+@@ -1169,13 +1169,12 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
+ return;
+
+ skb = __dev_alloc_skb(len, GFP_ATOMIC);
+- if (!skb)
+- return;
+-
+- /* Copy the data into the socket buffer and receive it */
+- skb_put(skb, len);
+- memcpy(skb->data, data, len);
+- skb->truesize += extra;
++ if (skb) {
++ /* Copy the data into the socket buffer and receive it */
++ skb_put(skb, len);
++ memcpy(skb->data, data, len);
++ skb->truesize += extra;
++ }
+
+ ipa_modem_skb_rx(endpoint->netdev, skb);
+ }
+--
+2.35.1
+
diff --git a/queue-5.17/net-ipa-record-proper-rx-transaction-count.patch b/queue-5.17/net-ipa-record-proper-rx-transaction-count.patch
new file mode 100644
index 0000000..1faa6a5
--- /dev/null
+++ b/queue-5.17/net-ipa-record-proper-rx-transaction-count.patch
@@ -0,0 +1,61 @@
+From c8235b53fc4275203fed3fd21879d904f70650bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 May 2022 10:10:32 -0500
+Subject: net: ipa: record proper RX transaction count
+
+From: Alex Elder <elder@linaro.org>
+
+[ Upstream commit d8290cbe1111105f92f0c8ab455bec8bf98d0630 ]
+
+Each time we are notified that some number of transactions on an RX
+channel has completed, we record the number of bytes that have been
+transferred since the previous notification. We also track the
+number of transactions completed, but that is not currently being
+calculated correctly; we're currently counting the number of such
+notifications, but each notification can represent many transaction
+completions. Fix this.
+
+Fixes: 650d1603825d8 ("soc: qcom: ipa: the generic software interface")
+Signed-off-by: Alex Elder <elder@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipa/gsi.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
+index bc981043cc80..a701178a1d13 100644
+--- a/drivers/net/ipa/gsi.c
++++ b/drivers/net/ipa/gsi.c
+@@ -1367,9 +1367,10 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+ struct gsi_event *event_done;
+ struct gsi_event *event;
+ struct gsi_trans *trans;
++ u32 trans_count = 0;
+ u32 byte_count = 0;
+- u32 old_index;
+ u32 event_avail;
++ u32 old_index;
+
+ trans_info = &channel->trans_info;
+
+@@ -1390,6 +1391,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+ do {
+ trans->len = __le16_to_cpu(event->len);
+ byte_count += trans->len;
++ trans_count++;
+
+ /* Move on to the next event and transaction */
+ if (--event_avail)
+@@ -1401,7 +1403,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+
+ /* We record RX bytes when they are received */
+ channel->byte_count += byte_count;
+- channel->trans_count++;
++ channel->trans_count += trans_count;
+ }
+
+ /* Initialize a ring, including allocating DMA memory for its entries */
+--
+2.35.1
+
diff --git a/queue-5.17/net-lan966x-fix-assignment-of-the-mac-address.patch b/queue-5.17/net-lan966x-fix-assignment-of-the-mac-address.patch
new file mode 100644
index 0000000..4a5073c
--- /dev/null
+++ b/queue-5.17/net-lan966x-fix-assignment-of-the-mac-address.patch
@@ -0,0 +1,90 @@
+From 23ddbd55c2bd2ea8e4dce92db9f6ad661debf92a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 May 2022 20:00:30 +0200
+Subject: net: lan966x: Fix assignment of the MAC address
+
+From: Horatiu Vultur <horatiu.vultur@microchip.com>
+
+[ Upstream commit af8ca6eaa9b24a90484218e356f959a94bff22fa ]
+
+The following two scenarios were failing for lan966x.
+1. If the port had the address X and then trying to assign the same
+ address, then the HW was just removing this address because first it
+ tries to learn new address and then delete the old one. As they are
+ the same the HW remove it.
+2. If the port eth0 was assigned the same address as one of the other
+ ports eth1 then when assigning back the address to eth0 then the HW
+ was deleting the address of eth1.
+
+The case 1. is fixed by checking if the port has already the same
+address while case 2. is fixed by checking if the address is used by any
+other port.
+
+Fixes: e18aba8941b40b ("net: lan966x: add mactable support")
+Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
+Link: https://lore.kernel.org/r/20220513180030.3076793-1-horatiu.vultur@microchip.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/microchip/lan966x/lan966x_main.c | 28 +++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+index 1f60fd125a1d..fee148bbf13e 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+@@ -100,6 +100,24 @@ static int lan966x_create_targets(struct platform_device *pdev,
+ return 0;
+ }
+
++static bool lan966x_port_unique_address(struct net_device *dev)
++{
++ struct lan966x_port *port = netdev_priv(dev);
++ struct lan966x *lan966x = port->lan966x;
++ int p;
++
++ for (p = 0; p < lan966x->num_phys_ports; ++p) {
++ port = lan966x->ports[p];
++ if (!port || port->dev == dev)
++ continue;
++
++ if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr))
++ return false;
++ }
++
++ return true;
++}
++
+ static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
+ {
+ struct lan966x_port *port = netdev_priv(dev);
+@@ -107,16 +125,26 @@ static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
+ const struct sockaddr *addr = p;
+ int ret;
+
++ if (ether_addr_equal(addr->sa_data, dev->dev_addr))
++ return 0;
++
+ /* Learn the new net device MAC address in the mac table. */
+ ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
+ if (ret)
+ return ret;
+
++ /* If there is another port with the same address as the dev, then don't
++ * delete it from the MAC table
++ */
++ if (!lan966x_port_unique_address(dev))
++ goto out;
++
+ /* Then forget the previous one. */
+ ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
+ if (ret)
+ return ret;
+
++out:
+ eth_hw_addr_set(dev, addr->sa_data);
+ return ret;
+ }
+--
+2.35.1
+
diff --git a/queue-5.17/net-macb-increment-rx-bd-head-after-allocating-skb-a.patch b/queue-5.17/net-macb-increment-rx-bd-head-after-allocating-skb-a.patch
new file mode 100644
index 0000000..7bf2fb2
--- /dev/null
+++ b/queue-5.17/net-macb-increment-rx-bd-head-after-allocating-skb-a.patch
@@ -0,0 +1,53 @@
+From 5d75c47d7a8d39457c7a9046ed17eaefb8b676c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 May 2022 22:49:00 +0530
+Subject: net: macb: Increment rx bd head after allocating skb and buffer
+
+From: Harini Katakam <harini.katakam@xilinx.com>
+
+[ Upstream commit 9500acc631dbb8b73166e25700e656b11f6007b6 ]
+
+In gem_rx_refill rx_prepared_head is incremented at the beginning of
+the while loop preparing the skb and data buffers. If the skb or data
+buffer allocation fails, this BD will be unusable BDs until the head
+loops back to the same BD (and obviously buffer allocation succeeds).
+In the unlikely event that there's a string of allocation failures,
+there will be an equal number of unusable BDs and an inconsistent RX
+BD chain. Hence increment the head at the end of the while loop to be
+clean.
+
+Fixes: 4df95131ea80 ("net/macb: change RX path for GEM")
+Signed-off-by: Harini Katakam <harini.katakam@xilinx.com>
+Signed-off-by: Michal Simek <michal.simek@xilinx.com>
+Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+Reviewed-by: Claudiu Beznea <claudiu.beznea@microchip.com>
+Link: https://lore.kernel.org/r/20220512171900.32593-1-harini.katakam@xilinx.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/cadence/macb_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index c4f4b13ac469..c1100af5666b 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1217,7 +1217,6 @@ static void gem_rx_refill(struct macb_queue *queue)
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+- queue->rx_prepared_head++;
+ desc = macb_rx_desc(queue, entry);
+
+ if (!queue->rx_skbuff[entry]) {
+@@ -1256,6 +1255,7 @@ static void gem_rx_refill(struct macb_queue *queue)
+ dma_wmb();
+ desc->addr &= ~MACB_BIT(RX_USED);
+ }
++ queue->rx_prepared_head++;
+ }
+
+ /* Make descriptor updates visible to hardware */
+--
+2.35.1
+
diff --git a/queue-5.17/net-mlx5-dr-fix-missing-flow_source-when-creating-mu.patch b/queue-5.17/net-mlx5-dr-fix-missing-flow_source-when-creating-mu.patch
new file mode 100644
index 0000000..75603b3
--- /dev/null
+++ b/queue-5.17/net-mlx5-dr-fix-missing-flow_source-when-creating-mu.patch
@@ -0,0 +1,130 @@
+From 5a73835859bf34f79392b7753dd4e5d9f328e3fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Mar 2022 10:07:44 +0200
+Subject: net/mlx5: DR, Fix missing flow_source when creating multi-destination
+ FW table
+
+From: Maor Dickman <maord@nvidia.com>
+
+[ Upstream commit 2c5fc6cd269ad3476da99dad02521d2af4a8e906 ]
+
+In order to support multiple destination FTEs with SW steering
+FW table is created with single FTE with multiple actions and
+SW steering rule forward to it. When creating this table, flow
+source isn't set according to the original FTE.
+
+Fix this by passing the original FTE flow source to the created
+FW table.
+
+Fixes: 34583beea4b7 ("net/mlx5: DR, Create multi-destination table for SW-steering use")
+Signed-off-by: Maor Dickman <maord@nvidia.com>
+Reviewed-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/steering/dr_action.c | 6 ++++--
+ drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c | 4 +++-
+ drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h | 3 ++-
+ drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c | 4 +++-
+ drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h | 3 ++-
+ 5 files changed, 14 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+index c61a5e83c78c..5d1caf97a8fc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+@@ -847,7 +847,8 @@ struct mlx5dr_action *
+ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests,
+- bool ignore_flow_level)
++ bool ignore_flow_level,
++ u32 flow_source)
+ {
+ struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
+ struct mlx5dr_action **ref_actions;
+@@ -919,7 +920,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ reformat_req,
+ &action->dest_tbl->fw_tbl.id,
+ &action->dest_tbl->fw_tbl.group_id,
+- ignore_flow_level);
++ ignore_flow_level,
++ flow_source);
+ if (ret)
+ goto free_action;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+index 68a4c32d5f34..f05ef0cd54ba 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+@@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ bool reformat_req,
+ u32 *tbl_id,
+ u32 *group_id,
+- bool ignore_flow_level)
++ bool ignore_flow_level,
++ u32 flow_source)
+ {
+ struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
+ struct mlx5dr_cmd_fte_info fte_info = {};
+@@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ fte_info.val = val;
+ fte_info.dest_arr = dest;
+ fte_info.ignore_flow_level = ignore_flow_level;
++ fte_info.flow_context.flow_source = flow_source;
+
+ ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
+ if (ret) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+index 55fcb751e24a..64f41e7938e1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+@@ -1463,7 +1463,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+ bool reformat_req,
+ u32 *tbl_id,
+ u32 *group_id,
+- bool ignore_flow_level);
++ bool ignore_flow_level,
++ u32 flow_source);
+ void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
+ u32 group_id);
+ #endif /* _DR_TYPES_H_ */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+index 3f311462bedf..05393fe11132 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+@@ -520,6 +520,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
+ } else if (num_term_actions > 1) {
+ bool ignore_flow_level =
+ !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
++ u32 flow_source = fte->flow_context.flow_source;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+@@ -529,7 +530,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
+ tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
+ term_actions,
+ num_term_actions,
+- ignore_flow_level);
++ ignore_flow_level,
++ flow_source);
+ if (!tmp_action) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+index dfa223415fe2..74a7a2f4d50d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+@@ -96,7 +96,8 @@ struct mlx5dr_action *
+ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action_dest *dests,
+ u32 num_of_dests,
+- bool ignore_flow_level);
++ bool ignore_flow_level,
++ u32 flow_source);
+
+ struct mlx5dr_action *mlx5dr_action_create_drop(void);
+
+--
+2.35.1
+
diff --git a/queue-5.17/net-mlx5-dr-ignore-modify-ttl-on-rx-if-device-doesn-.patch b/queue-5.17/net-mlx5-dr-ignore-modify-ttl-on-rx-if-device-doesn-.patch
new file mode 100644
index 0000000..47ea6b4
--- /dev/null
+++ b/queue-5.17/net-mlx5-dr-ignore-modify-ttl-on-rx-if-device-doesn-.patch
@@ -0,0 +1,176 @@
+From 7d98679d33534ae54f697dbc34901f04c04108db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 3 Apr 2022 23:18:10 +0300
+Subject: net/mlx5: DR, Ignore modify TTL on RX if device doesn't support it
+
+From: Yevgeny Kliteynik <kliteyn@nvidia.com>
+
+[ Upstream commit 785d7ed295513bd3374095304b7034fd65c123b0 ]
+
+When modifying TTL, packet's csum has to be recalculated.
+Due to HW issue in ConnectX-5, csum recalculation for modify
+TTL on RX is supported through a work-around that is specifically
+enabled by configuration.
+If the work-around isn't enabled, rather than adding an unsupported
+action the modify TTL action on RX should be ignored.
+Ignoring modify TTL action might result in zero actions, so in such
+cases we will not convert the match STE to modify STE, as it is done
+by FW in DMFS.
+
+This patch fixes an issue where modify TTL action was ignored both
+on RX and TX instead of only on RX.
+
+Fixes: 4ff725e1d4ad ("net/mlx5: DR, Ignore modify TTL if device doesn't support it")
+Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
+Reviewed-by: Alex Vesker <valex@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../mellanox/mlx5/core/steering/dr_action.c | 65 +++++++++++++------
+ .../mellanox/mlx5/core/steering/dr_ste_v0.c | 4 +-
+ 2 files changed, 48 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+index 5d1caf97a8fc..8622af6d6bf8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+@@ -530,6 +530,37 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
+ return 0;
+ }
+
++static void dr_action_modify_ttl_adjust(struct mlx5dr_domain *dmn,
++ struct mlx5dr_ste_actions_attr *attr,
++ bool rx_rule,
++ bool *recalc_cs_required)
++{
++ *recalc_cs_required = false;
++
++ /* if device supports csum recalculation - no adjustment needed */
++ if (mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps))
++ return;
++
++ /* no adjustment needed on TX rules */
++ if (!rx_rule)
++ return;
++
++ if (!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify)) {
++ /* Ignore the modify TTL action.
++ * It is always kept as last HW action.
++ */
++ attr->modify_actions--;
++ return;
++ }
++
++ if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
++ /* Due to a HW bug on some devices, modifying TTL on RX flows
++ * will cause an incorrect checksum calculation. In such cases
++ * we will use a FW table to recalculate the checksum.
++ */
++ *recalc_cs_required = true;
++}
++
+ static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
+ struct mlx5dr_action *actions[],
+ int last_idx)
+@@ -649,8 +680,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ case DR_ACTION_TYP_MODIFY_HDR:
+ attr.modify_index = action->rewrite->index;
+ attr.modify_actions = action->rewrite->num_of_actions;
+- recalc_cs_required = action->rewrite->modify_ttl &&
+- !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
++ if (action->rewrite->modify_ttl)
++ dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
++ &recalc_cs_required);
+ break;
+ case DR_ACTION_TYP_L2_TO_TNL_L2:
+ case DR_ACTION_TYP_L2_TO_TNL_L3:
+@@ -737,12 +769,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+ *new_hw_ste_arr_sz = nic_matcher->num_of_builders;
+ last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
+
+- /* Due to a HW bug in some devices, modifying TTL on RX flows will
+- * cause an incorrect checksum calculation. In this case we will
+- * use a FW table to recalculate.
+- */
+- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
+- rx_rule && recalc_cs_required && dest_action) {
++ if (recalc_cs_required && dest_action) {
+ ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
+ if (ret) {
+ mlx5dr_err(dmn,
+@@ -1562,12 +1589,6 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
+ return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
+ }
+
+-static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
+-{
+- return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
+- !MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
+-}
+-
+ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
+ u32 max_hw_actions,
+ u32 num_sw_actions,
+@@ -1579,6 +1600,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
+ const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
+ struct mlx5dr_domain *dmn = action->rewrite->dmn;
++ __be64 *modify_ttl_sw_action = NULL;
+ int ret, i, hw_idx = 0;
+ __be64 *sw_action;
+ __be64 hw_action;
+@@ -1591,8 +1613,14 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
+ action->rewrite->allow_rx = 1;
+ action->rewrite->allow_tx = 1;
+
+- for (i = 0; i < num_sw_actions; i++) {
+- sw_action = &sw_actions[i];
++ for (i = 0; i < num_sw_actions || modify_ttl_sw_action; i++) {
++ /* modify TTL is handled separately, as a last action */
++ if (i == num_sw_actions) {
++ sw_action = modify_ttl_sw_action;
++ modify_ttl_sw_action = NULL;
++ } else {
++ sw_action = &sw_actions[i];
++ }
+
+ ret = dr_action_modify_check_field_limitation(action,
+ sw_action);
+@@ -1601,10 +1629,9 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
+
+ if (!(*modify_ttl) &&
+ dr_action_modify_check_is_ttl_modify(sw_action)) {
+- if (dr_action_modify_ttl_ignore(dmn))
+- continue;
+-
++ modify_ttl_sw_action = sw_action;
+ *modify_ttl = true;
++ continue;
+ }
+
+ /* Convert SW action to HW action */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+index 2d62950f7a29..134c8484c901 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+@@ -419,7 +419,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+ * encapsulation. The reason for that is that we support
+ * modify headers for outer headers only
+ */
+- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
++ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
+ dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+@@ -511,7 +511,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+ }
+ }
+
+- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
++ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
+ if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+--
+2.35.1
+
diff --git a/queue-5.17/net-mlx5-drain-fw_reset-when-removing-device.patch b/queue-5.17/net-mlx5-drain-fw_reset-when-removing-device.patch
new file mode 100644
index 0000000..9d903c4
--- /dev/null
+++ b/queue-5.17/net-mlx5-drain-fw_reset-when-removing-device.patch
@@ -0,0 +1,128 @@
+From 599bfae0c914e96ca4099df361d8342b9c7987b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 10:47:36 +0300
+Subject: net/mlx5: Drain fw_reset when removing device
+
+From: Shay Drory <shayd@nvidia.com>
+
+[ Upstream commit 16d42d313350946f4b9a8b74a13c99f0461a6572 ]
+
+In case fw sync reset is called in parallel to device removal, device
+might stuck in the following deadlock:
+ CPU 0 CPU 1
+ ----- -----
+ remove_one
+ uninit_one (locks intf_state_mutex)
+mlx5_sync_reset_now_event()
+work in fw_reset->wq.
+ mlx5_enter_error_state()
+ mutex_lock (intf_state_mutex)
+ cleanup_once
+ fw_reset_cleanup()
+ destroy_workqueue(fw_reset->wq)
+
+Drain the fw_reset WQ, and make sure no new work is being queued, before
+entering uninit_one().
+The Drain is done before devlink_unregister() since fw_reset, in some
+flows, is using devlink API devlink_remote_reload_actions_performed().
+
+Fixes: 38b9f903f22b ("net/mlx5: Handle sync reset request event")
+Signed-off-by: Shay Drory <shayd@nvidia.com>
+Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/mellanox/mlx5/core/fw_reset.c | 25 ++++++++++++++++---
+ .../ethernet/mellanox/mlx5/core/fw_reset.h | 1 +
+ .../net/ethernet/mellanox/mlx5/core/main.c | 4 +++
+ 3 files changed, 27 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index 862f5b7cb210..1c771287bee5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -8,7 +8,8 @@
+ enum {
+ MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
+ MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
+- MLX5_FW_RESET_FLAGS_PENDING_COMP
++ MLX5_FW_RESET_FLAGS_PENDING_COMP,
++ MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
+ };
+
+ struct mlx5_fw_reset {
+@@ -165,7 +166,10 @@ static void poll_sync_reset(struct timer_list *t)
+
+ if (fatal_error) {
+ mlx5_core_warn(dev, "Got Device Reset\n");
+- queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
++ if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
++ queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
++ else
++ mlx5_core_err(dev, "Device is being removed, Drop new reset work\n");
+ return;
+ }
+
+@@ -390,9 +394,12 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
+ struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
+ struct mlx5_eqe *eqe = data;
+
++ if (test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
++ return NOTIFY_DONE;
++
+ switch (eqe->sub_type) {
+ case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
+- queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
++ queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
+ break;
+ case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
+ mlx5_sync_reset_events_handle(fw_reset, eqe);
+@@ -436,6 +443,18 @@ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
+ mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
+ }
+
++void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
++{
++ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
++
++ set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
++ cancel_work_sync(&fw_reset->fw_live_patch_work);
++ cancel_work_sync(&fw_reset->reset_request_work);
++ cancel_work_sync(&fw_reset->reset_reload_work);
++ cancel_work_sync(&fw_reset->reset_now_work);
++ cancel_work_sync(&fw_reset->reset_abort_work);
++}
++
+ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+index 7761ee5fc7d0..372046e173e7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+@@ -15,6 +15,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
+ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
+ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
+ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
++void mlx5_drain_fw_reset(struct mlx5_core_dev *dev);
+ int mlx5_fw_reset_init(struct mlx5_core_dev *dev);
+ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index f1437b6d4418..4e49dca94bc3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1628,6 +1628,10 @@ static void remove_one(struct pci_dev *pdev)
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
+
++ /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
++ * fw_reset before unregistering the devlink.
++ */
++ mlx5_drain_fw_reset(dev);
+ devlink_unregister(devlink);
+ mlx5_crdump_disable(dev);
+ mlx5_drain_health_wq(dev);
+--
+2.35.1
+
diff --git a/queue-5.17/net-mlx5-initialize-flow-steering-during-driver-prob.patch b/queue-5.17/net-mlx5-initialize-flow-steering-during-driver-prob.patch
new file mode 100644
index 0000000..14c05af
--- /dev/null
+++ b/queue-5.17/net-mlx5-initialize-flow-steering-during-driver-prob.patch
@@ -0,0 +1,397 @@
+From a63eef70b4ed8584c74752ee6d38451d1982a731 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Mar 2022 14:45:58 +0200
+Subject: net/mlx5: Initialize flow steering during driver probe
+
+From: Shay Drory <shayd@nvidia.com>
+
+[ Upstream commit b33886971dbc4a86d1ec5369a2aaefc60a7cd72d ]
+
+Currently, software objects of flow steering are created and destroyed
+during reload flow. In case a device is unloaded, the following error
+is printed during grace period:
+
+ mlx5_core 0000:00:0b.0: mlx5_fw_fatal_reporter_err_work:690:(pid 95):
+ Driver is in error state. Unloading
+
+As a solution to fix use-after-free bugs, where we try to access
+these objects, when reading the value of flow_steering_mode devlink
+param[1], let's split flow steering creation and destruction into two
+routines:
+ * init and cleanup: memory, cache, and pools allocation/free.
+ * create and destroy: namespaces initialization and cleanup.
+
+While at it, re-order the cleanup function to mirror the init function.
+
+[1]
+Kasan trace:
+
+[ 385.119849 ] BUG: KASAN: use-after-free in mlx5_devlink_fs_mode_get+0x3b/0xa0
+[ 385.119849 ] Read of size 4 at addr ffff888104b79308 by task bash/291
+[ 385.119849 ]
+[ 385.119849 ] CPU: 1 PID: 291 Comm: bash Not tainted 5.17.0-rc1+ #2
+[ 385.119849 ] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-2.fc32 04/01/2014
+[ 385.119849 ] Call Trace:
+[ 385.119849 ] <TASK>
+[ 385.119849 ] dump_stack_lvl+0x6e/0x91
+[ 385.119849 ] print_address_description.constprop.0+0x1f/0x160
+[ 385.119849 ] ? mlx5_devlink_fs_mode_get+0x3b/0xa0
+[ 385.119849 ] ? mlx5_devlink_fs_mode_get+0x3b/0xa0
+[ 385.119849 ] kasan_report.cold+0x83/0xdf
+[ 385.119849 ] ? devlink_param_notify+0x20/0x190
+[ 385.119849 ] ? mlx5_devlink_fs_mode_get+0x3b/0xa0
+[ 385.119849 ] mlx5_devlink_fs_mode_get+0x3b/0xa0
+[ 385.119849 ] devlink_nl_param_fill+0x18a/0xa50
+[ 385.119849 ] ? _raw_spin_lock_irqsave+0x8d/0xe0
+[ 385.119849 ] ? devlink_flash_update_timeout_notify+0xf0/0xf0
+[ 385.119849 ] ? __wake_up_common+0x4b/0x1e0
+[ 385.119849 ] ? preempt_count_sub+0x14/0xc0
+[ 385.119849 ] ? _raw_spin_unlock_irqrestore+0x28/0x40
+[ 385.119849 ] ? __wake_up_common_lock+0xe3/0x140
+[ 385.119849 ] ? __wake_up_common+0x1e0/0x1e0
+[ 385.119849 ] ? __sanitizer_cov_trace_const_cmp8+0x27/0x80
+[ 385.119849 ] ? __rcu_read_unlock+0x48/0x70
+[ 385.119849 ] ? kasan_unpoison+0x23/0x50
+[ 385.119849 ] ? __kasan_slab_alloc+0x2c/0x80
+[ 385.119849 ] ? memset+0x20/0x40
+[ 385.119849 ] ? __sanitizer_cov_trace_const_cmp4+0x25/0x80
+[ 385.119849 ] devlink_param_notify+0xce/0x190
+[ 385.119849 ] devlink_unregister+0x92/0x2b0
+[ 385.119849 ] remove_one+0x41/0x140
+[ 385.119849 ] pci_device_remove+0x68/0x140
+[ 385.119849 ] ? pcibios_free_irq+0x10/0x10
+[ 385.119849 ] __device_release_driver+0x294/0x3f0
+[ 385.119849 ] device_driver_detach+0x82/0x130
+[ 385.119849 ] unbind_store+0x193/0x1b0
+[ 385.119849 ] ? subsys_interface_unregister+0x270/0x270
+[ 385.119849 ] drv_attr_store+0x4e/0x70
+[ 385.119849 ] ? drv_attr_show+0x60/0x60
+[ 385.119849 ] sysfs_kf_write+0xa7/0xc0
+[ 385.119849 ] kernfs_fop_write_iter+0x23a/0x2f0
+[ 385.119849 ] ? sysfs_kf_bin_read+0x160/0x160
+[ 385.119849 ] new_sync_write+0x311/0x430
+[ 385.119849 ] ? new_sync_read+0x480/0x480
+[ 385.119849 ] ? _raw_spin_lock+0x87/0xe0
+[ 385.119849 ] ? __sanitizer_cov_trace_cmp4+0x25/0x80
+[ 385.119849 ] ? security_file_permission+0x94/0xa0
+[ 385.119849 ] vfs_write+0x4c7/0x590
+[ 385.119849 ] ksys_write+0xf6/0x1e0
+[ 385.119849 ] ? __x64_sys_read+0x50/0x50
+[ 385.119849 ] ? fpregs_assert_state_consistent+0x99/0xa0
+[ 385.119849 ] do_syscall_64+0x3d/0x90
+[ 385.119849 ] entry_SYSCALL_64_after_hwframe+0x44/0xae
+[ 385.119849 ] RIP: 0033:0x7fc36ef38504
+[ 385.119849 ] Code: 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b3 0f 1f
+80 00 00 00 00 48 8d 05 f9 61 0d 00 8b 00 85 c0 75 13 b8 01 00 00 00 0f
+05 <48> 3d 00 f0 ff ff 77 54 c3 0f 1f 00 41 54 49 89 d4 55 48 89 f5 53
+[ 385.119849 ] RSP: 002b:00007ffde0ff3d08 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+[ 385.119849 ] RAX: ffffffffffffffda RBX: 000000000000000c RCX: 00007fc36ef38504
+[ 385.119849 ] RDX: 000000000000000c RSI: 00007fc370521040 RDI: 0000000000000001
+[ 385.119849 ] RBP: 00007fc370521040 R08: 00007fc36f00b8c0 R09: 00007fc36ee4b740
+[ 385.119849 ] R10: 0000000000000000 R11: 0000000000000246 R12: 00007fc36f00a760
+[ 385.119849 ] R13: 000000000000000c R14: 00007fc36f005760 R15: 000000000000000c
+[ 385.119849 ] </TASK>
+[ 385.119849 ]
+[ 385.119849 ] Allocated by task 65:
+[ 385.119849 ] kasan_save_stack+0x1e/0x40
+[ 385.119849 ] __kasan_kmalloc+0x81/0xa0
+[ 385.119849 ] mlx5_init_fs+0x11b/0x1160
+[ 385.119849 ] mlx5_load+0x13c/0x220
+[ 385.119849 ] mlx5_load_one+0xda/0x160
+[ 385.119849 ] mlx5_recover_device+0xb8/0x100
+[ 385.119849 ] mlx5_health_try_recover+0x2f9/0x3a1
+[ 385.119849 ] devlink_health_reporter_recover+0x75/0x100
+[ 385.119849 ] devlink_health_report+0x26c/0x4b0
+[ 385.275909 ] mlx5_fw_fatal_reporter_err_work+0x11e/0x1b0
+[ 385.275909 ] process_one_work+0x520/0x970
+[ 385.275909 ] worker_thread+0x378/0x950
+[ 385.275909 ] kthread+0x1bb/0x200
+[ 385.275909 ] ret_from_fork+0x1f/0x30
+[ 385.275909 ]
+[ 385.275909 ] Freed by task 65:
+[ 385.275909 ] kasan_save_stack+0x1e/0x40
+[ 385.275909 ] kasan_set_track+0x21/0x30
+[ 385.275909 ] kasan_set_free_info+0x20/0x30
+[ 385.275909 ] __kasan_slab_free+0xfc/0x140
+[ 385.275909 ] kfree+0xa5/0x3b0
+[ 385.275909 ] mlx5_unload+0x2e/0xb0
+[ 385.275909 ] mlx5_unload_one+0x86/0xb0
+[ 385.275909 ] mlx5_fw_fatal_reporter_err_work.cold+0xca/0xcf
+[ 385.275909 ] process_one_work+0x520/0x970
+[ 385.275909 ] worker_thread+0x378/0x950
+[ 385.275909 ] kthread+0x1bb/0x200
+[ 385.275909 ] ret_from_fork+0x1f/0x30
+[ 385.275909 ]
+[ 385.275909 ] The buggy address belongs to the object at ffff888104b79300
+[ 385.275909 ] which belongs to the cache kmalloc-128 of size 128
+[ 385.275909 ] The buggy address is located 8 bytes inside of
+[ 385.275909 ] 128-byte region [ffff888104b79300, ffff888104b79380)
+[ 385.275909 ] The buggy address belongs to the page:
+[ 385.275909 ] page:00000000de44dd39 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x104b78
+[ 385.275909 ] head:00000000de44dd39 order:1 compound_mapcount:0
+[ 385.275909 ] flags: 0x8000000000010200(slab|head|zone=2)
+[ 385.275909 ] raw: 8000000000010200 0000000000000000 dead000000000122 ffff8881000428c0
+[ 385.275909 ] raw: 0000000000000000 0000000080200020 00000001ffffffff 0000000000000000
+[ 385.275909 ] page dumped because: kasan: bad access detected
+[ 385.275909 ]
+[ 385.275909 ] Memory state around the buggy address:
+[ 385.275909 ] ffff888104b79200: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fc fc
+[ 385.275909 ] ffff888104b79280: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[ 385.275909 ] >ffff888104b79300: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 385.275909 ] ^
+[ 385.275909 ] ffff888104b79380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[ 385.275909 ] ffff888104b79400: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 385.275909 ]]
+
+Fixes: e890acd5ff18 ("net/mlx5: Add devlink flow_steering_mode parameter")
+Signed-off-by: Shay Drory <shayd@nvidia.com>
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/fs_core.c | 131 ++++++++++--------
+ .../net/ethernet/mellanox/mlx5/core/fs_core.h | 6 +-
+ .../net/ethernet/mellanox/mlx5/core/main.c | 15 +-
+ 3 files changed, 91 insertions(+), 61 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 537c82b9aa53..b6f58d16d145 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -2656,28 +2656,6 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
+ clean_tree(&root_ns->ns.node);
+ }
+
+-void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
+-{
+- struct mlx5_flow_steering *steering = dev->priv.steering;
+-
+- cleanup_root_ns(steering->root_ns);
+- cleanup_root_ns(steering->fdb_root_ns);
+- steering->fdb_root_ns = NULL;
+- kfree(steering->fdb_sub_ns);
+- steering->fdb_sub_ns = NULL;
+- cleanup_root_ns(steering->port_sel_root_ns);
+- cleanup_root_ns(steering->sniffer_rx_root_ns);
+- cleanup_root_ns(steering->sniffer_tx_root_ns);
+- cleanup_root_ns(steering->rdma_rx_root_ns);
+- cleanup_root_ns(steering->rdma_tx_root_ns);
+- cleanup_root_ns(steering->egress_root_ns);
+- mlx5_cleanup_fc_stats(dev);
+- kmem_cache_destroy(steering->ftes_cache);
+- kmem_cache_destroy(steering->fgs_cache);
+- mlx5_ft_pool_destroy(dev);
+- kfree(steering);
+-}
+-
+ static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
+ {
+ struct fs_prio *prio;
+@@ -3063,42 +3041,27 @@ static int init_egress_root_ns(struct mlx5_flow_steering *steering)
+ return err;
+ }
+
+-int mlx5_init_fs(struct mlx5_core_dev *dev)
++void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
+ {
+- struct mlx5_flow_steering *steering;
+- int err = 0;
+-
+- err = mlx5_init_fc_stats(dev);
+- if (err)
+- return err;
+-
+- err = mlx5_ft_pool_init(dev);
+- if (err)
+- return err;
+-
+- steering = kzalloc(sizeof(*steering), GFP_KERNEL);
+- if (!steering) {
+- err = -ENOMEM;
+- goto err;
+- }
+-
+- steering->dev = dev;
+- dev->priv.steering = steering;
++ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+- if (mlx5_fs_dr_is_supported(dev))
+- steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
+- else
+- steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
++ cleanup_root_ns(steering->root_ns);
++ cleanup_root_ns(steering->fdb_root_ns);
++ steering->fdb_root_ns = NULL;
++ kfree(steering->fdb_sub_ns);
++ steering->fdb_sub_ns = NULL;
++ cleanup_root_ns(steering->port_sel_root_ns);
++ cleanup_root_ns(steering->sniffer_rx_root_ns);
++ cleanup_root_ns(steering->sniffer_tx_root_ns);
++ cleanup_root_ns(steering->rdma_rx_root_ns);
++ cleanup_root_ns(steering->rdma_tx_root_ns);
++ cleanup_root_ns(steering->egress_root_ns);
++}
+
+- steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+- sizeof(struct mlx5_flow_group), 0,
+- 0, NULL);
+- steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+- 0, NULL);
+- if (!steering->ftes_cache || !steering->fgs_cache) {
+- err = -ENOMEM;
+- goto err;
+- }
++int mlx5_fs_core_init(struct mlx5_core_dev *dev)
++{
++ struct mlx5_flow_steering *steering = dev->priv.steering;
++ int err = 0;
+
+ if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ (MLX5_CAP_GEN(dev, nic_flow_table))) ||
+@@ -3157,8 +3120,64 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
+ }
+
+ return 0;
++
++err:
++ mlx5_fs_core_cleanup(dev);
++ return err;
++}
++
++void mlx5_fs_core_free(struct mlx5_core_dev *dev)
++{
++ struct mlx5_flow_steering *steering = dev->priv.steering;
++
++ kmem_cache_destroy(steering->ftes_cache);
++ kmem_cache_destroy(steering->fgs_cache);
++ kfree(steering);
++ mlx5_ft_pool_destroy(dev);
++ mlx5_cleanup_fc_stats(dev);
++}
++
++int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
++{
++ struct mlx5_flow_steering *steering;
++ int err = 0;
++
++ err = mlx5_init_fc_stats(dev);
++ if (err)
++ return err;
++
++ err = mlx5_ft_pool_init(dev);
++ if (err)
++ goto err;
++
++ steering = kzalloc(sizeof(*steering), GFP_KERNEL);
++ if (!steering) {
++ err = -ENOMEM;
++ goto err;
++ }
++
++ steering->dev = dev;
++ dev->priv.steering = steering;
++
++ if (mlx5_fs_dr_is_supported(dev))
++ steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
++ else
++ steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
++
++ steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
++ sizeof(struct mlx5_flow_group), 0,
++ 0, NULL);
++ steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
++ 0, NULL);
++ if (!steering->ftes_cache || !steering->fgs_cache) {
++ err = -ENOMEM;
++ goto err;
++ }
++
++ return 0;
++
+ err:
+- mlx5_cleanup_fs(dev);
++ mlx5_fs_core_free(dev);
+ return err;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+index 5469b08d635f..6366bf50a564 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+@@ -293,8 +293,10 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
+ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
+ enum mlx5_flow_steering_mode mode);
+
+-int mlx5_init_fs(struct mlx5_core_dev *dev);
+-void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
++int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
++void mlx5_fs_core_free(struct mlx5_core_dev *dev);
++int mlx5_fs_core_init(struct mlx5_core_dev *dev);
++void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
+
+ int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
+ void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index bba72b220cc3..f1437b6d4418 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -939,6 +939,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
+ goto err_sf_table_cleanup;
+ }
+
++ err = mlx5_fs_core_alloc(dev);
++ if (err) {
++ mlx5_core_err(dev, "Failed to alloc flow steering\n");
++ goto err_fs;
++ }
++
+ dev->dm = mlx5_dm_create(dev);
+ if (IS_ERR(dev->dm))
+ mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
+@@ -949,6 +955,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
+
+ return 0;
+
++err_fs:
++ mlx5_sf_table_cleanup(dev);
+ err_sf_table_cleanup:
+ mlx5_sf_hw_table_cleanup(dev);
+ err_sf_hw_table_cleanup:
+@@ -986,6 +994,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
+ mlx5_hv_vhca_destroy(dev->hv_vhca);
+ mlx5_fw_tracer_destroy(dev->tracer);
+ mlx5_dm_cleanup(dev);
++ mlx5_fs_core_free(dev);
+ mlx5_sf_table_cleanup(dev);
+ mlx5_sf_hw_table_cleanup(dev);
+ mlx5_vhca_event_cleanup(dev);
+@@ -1192,7 +1201,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
+ goto err_tls_start;
+ }
+
+- err = mlx5_init_fs(dev);
++ err = mlx5_fs_core_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init flow steering\n");
+ goto err_fs;
+@@ -1237,7 +1246,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
+ err_vhca:
+ mlx5_vhca_event_stop(dev);
+ err_set_hca:
+- mlx5_cleanup_fs(dev);
++ mlx5_fs_core_cleanup(dev);
+ err_fs:
+ mlx5_accel_tls_cleanup(dev);
+ err_tls_start:
+@@ -1266,7 +1275,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
+ mlx5_ec_cleanup(dev);
+ mlx5_sf_hw_table_destroy(dev);
+ mlx5_vhca_event_stop(dev);
+- mlx5_cleanup_fs(dev);
++ mlx5_fs_core_cleanup(dev);
+ mlx5_accel_ipsec_cleanup(dev);
+ mlx5_accel_tls_cleanup(dev);
+ mlx5_fpga_device_stop(dev);
+--
+2.35.1
+
diff --git a/queue-5.17/net-mlx5e-block-rx-gro-hw-feature-in-switchdev-mode.patch b/queue-5.17/net-mlx5e-block-rx-gro-hw-feature-in-switchdev-mode.patch
new file mode 100644
index 0000000..84d5d38
--- /dev/null
+++ b/queue-5.17/net-mlx5e-block-rx-gro-hw-feature-in-switchdev-mode.patch
@@ -0,0 +1,41 @@
+From 597bbccebfc6be8c463a77e0f5fe6d39dfe36d2a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Apr 2022 17:29:08 +0300
+Subject: net/mlx5e: Block rx-gro-hw feature in switchdev mode
+
+From: Aya Levin <ayal@nvidia.com>
+
+[ Upstream commit 15a5078cab30d7aa02ad14bfadebf247d95fc239 ]
+
+When the driver is in switchdev mode and rx-gro-hw is set, the RQ needs
+special CQE handling. Till then, block setting of rx-gro-hw feature in
+switchdev mode, to avoid failure while setting the feature due to
+failure while opening the RQ.
+
+Fixes: f97d5c2a453e ("net/mlx5e: Add handle SHAMPO cqe support")
+Signed-off-by: Aya Levin <ayal@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 169e3524bb1c..d468daa7dc20 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3829,6 +3829,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
+ if (netdev->features & NETIF_F_NTUPLE)
+ netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
+
++ features &= ~NETIF_F_GRO_HW;
++ if (netdev->features & NETIF_F_GRO_HW)
++ netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
++
+ return features;
+ }
+
+--
+2.35.1
+
diff --git a/queue-5.17/net-mlx5e-properly-block-hw-gro-when-xdp-is-enabled.patch b/queue-5.17/net-mlx5e-properly-block-hw-gro-when-xdp-is-enabled.patch
new file mode 100644
index 0000000..e61e029
--- /dev/null
+++ b/queue-5.17/net-mlx5e-properly-block-hw-gro-when-xdp-is-enabled.patch
@@ -0,0 +1,52 @@
+From 583ffb01346f07604fe03b6fb417bd466d872c24 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Apr 2022 18:54:26 +0300
+Subject: net/mlx5e: Properly block HW GRO when XDP is enabled
+
+From: Maxim Mikityanskiy <maximmi@nvidia.com>
+
+[ Upstream commit b0617e7b35001c92c8fa777e1a095d3e693813df ]
+
+HW GRO is incompatible and mutually exclusive with XDP and XSK. However,
+the needed checks are only made when enabling XDP. If HW GRO is enabled
+when XDP is already active, the command will succeed, and XDP will be
+skipped in the data path, although still enabled.
+
+This commit fixes the bug by checking the XDP and XSK status in
+mlx5e_fix_features and disabling HW GRO if XDP is enabled.
+
+Fixes: 83439f3c37aa ("net/mlx5e: Add HW-GRO offload")
+Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 1f8fc8d77bc3..4b83dd05afcd 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3870,6 +3870,18 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
+ netdev_warn(netdev, "LRO is incompatible with XDP\n");
+ features &= ~NETIF_F_LRO;
+ }
++ if (features & NETIF_F_GRO_HW) {
++ netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
++ features &= ~NETIF_F_GRO_HW;
++ }
++ }
++
++ if (priv->xsk.refcnt) {
++ if (features & NETIF_F_GRO_HW) {
++ netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
++ priv->xsk.refcnt);
++ features &= ~NETIF_F_GRO_HW;
++ }
+ }
+
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+--
+2.35.1
+
diff --git a/queue-5.17/net-mlx5e-properly-block-lro-when-xdp-is-enabled.patch b/queue-5.17/net-mlx5e-properly-block-lro-when-xdp-is-enabled.patch
new file mode 100644
index 0000000..e5a91af
--- /dev/null
+++ b/queue-5.17/net-mlx5e-properly-block-lro-when-xdp-is-enabled.patch
@@ -0,0 +1,47 @@
+From f20ddffd8d0a355eef3c43fbbf2c46125b135ae5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Apr 2022 18:37:03 +0300
+Subject: net/mlx5e: Properly block LRO when XDP is enabled
+
+From: Maxim Mikityanskiy <maximmi@nvidia.com>
+
+[ Upstream commit cf6e34c8c22fba66bd21244b95ea47e235f68974 ]
+
+LRO is incompatible and mutually exclusive with XDP. However, the needed
+checks are only made when enabling XDP. If LRO is enabled when XDP is
+already active, the command will succeed, and XDP will be skipped in the
+data path, although still enabled.
+
+This commit fixes the bug by checking the XDP status in
+mlx5e_fix_features and disabling LRO if XDP is enabled.
+
+Fixes: 86994156c736 ("net/mlx5e: XDP fast RX drop bpf programs support")
+Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index d468daa7dc20..1f8fc8d77bc3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3865,6 +3865,13 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
+ }
+ }
+
++ if (params->xdp_prog) {
++ if (features & NETIF_F_LRO) {
++ netdev_warn(netdev, "LRO is incompatible with XDP\n");
++ features &= ~NETIF_F_LRO;
++ }
++ }
++
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+ features &= ~NETIF_F_RXHASH;
+ if (netdev->features & NETIF_F_RXHASH)
+--
+2.35.1
+
diff --git a/queue-5.17/net-mlx5e-remove-hw-gro-from-reported-features.patch b/queue-5.17/net-mlx5e-remove-hw-gro-from-reported-features.patch
new file mode 100644
index 0000000..52743f9
--- /dev/null
+++ b/queue-5.17/net-mlx5e-remove-hw-gro-from-reported-features.patch
@@ -0,0 +1,39 @@
+From b0a858df9ac13ae4c22a1f85eeefd1b72fa84ee9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Apr 2022 15:50:42 +0300
+Subject: net/mlx5e: Remove HW-GRO from reported features
+
+From: Gal Pressman <gal@nvidia.com>
+
+[ Upstream commit 6bbd723035badafe4a8eb17ccdecd96eae7a96d5 ]
+
+We got reports of certain HW-GRO flows causing kernel call traces, which
+might be related to firmware. To be on the safe side, disable the
+feature for now and re-enable it once a driver/firmware fix is found.
+
+Fixes: 83439f3c37aa ("net/mlx5e: Add HW-GRO offload")
+Signed-off-by: Gal Pressman <gal@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 4b83dd05afcd..3500faf08671 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4828,10 +4828,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+
+- if (!!MLX5_CAP_GEN(mdev, shampo) &&
+- mlx5e_check_fragmented_striding_rq_cap(mdev))
+- netdev->hw_features |= NETIF_F_GRO_HW;
+-
+ if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
+ netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= NETIF_F_TSO;
+--
+2.35.1
+
diff --git a/queue-5.17/net-qla3xxx-fix-a-test-in-ql_reset_work.patch b/queue-5.17/net-qla3xxx-fix-a-test-in-ql_reset_work.patch
new file mode 100644
index 0000000..f6f8800
--- /dev/null
+++ b/queue-5.17/net-qla3xxx-fix-a-test-in-ql_reset_work.patch
@@ -0,0 +1,48 @@
+From 39b6f8ba9533de8205e9a3f4151b0abafdca365b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 15 May 2022 20:07:02 +0200
+Subject: net/qla3xxx: Fix a test in ql_reset_work()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 5361448e45fac6fb96738df748229432a62d78b6 ]
+
+test_bit() tests if one bit is set or not.
+Here the logic seems to check of bit QL_RESET_PER_SCSI (i.e. 4) OR bit
+QL_RESET_START (i.e. 3) is set.
+
+In fact, it checks if bit 7 (4 | 3 = 7) is set, that is to say
+QL_ADAPTER_UP.
+
+This looks harmless, because this bit is likely be set, and when the
+ql_reset_work() delayed work is scheduled in ql3xxx_isr() (the only place
+that schedule this work), QL_RESET_START or QL_RESET_PER_SCSI is set.
+
+This has been spotted by smatch.
+
+Fixes: 5a4faa873782 ("[PATCH] qla3xxx NIC driver")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Link: https://lore.kernel.org/r/80e73e33f390001d9c0140ffa9baddf6466a41a2.1652637337.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qla3xxx.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
+index b30589a135c2..06f4d9a9e938 100644
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -3614,7 +3614,8 @@ static void ql_reset_work(struct work_struct *work)
+ qdev->mem_map_registers;
+ unsigned long hw_flags;
+
+- if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
++ if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
++ test_bit(QL_RESET_START, &qdev->flags)) {
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+
+ /*
+--
+2.35.1
+
diff --git a/queue-5.17/net-sched-act_pedit-sanitize-shift-argument-before-u.patch b/queue-5.17/net-sched-act_pedit-sanitize-shift-argument-before-u.patch
new file mode 100644
index 0000000..f3ece2a
--- /dev/null
+++ b/queue-5.17/net-sched-act_pedit-sanitize-shift-argument-before-u.patch
@@ -0,0 +1,84 @@
+From 253dcaab7f8f8f2ea29e8a0948f4b4b912bd2dc4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 May 2022 11:27:06 +0200
+Subject: net/sched: act_pedit: sanitize shift argument before usage
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 4d42d54a7d6aa6d29221d3fd4f2ae9503e94f011 ]
+
+syzbot was able to trigger an Out-of-Bound on the pedit action:
+
+UBSAN: shift-out-of-bounds in net/sched/act_pedit.c:238:43
+shift exponent 1400735974 is too large for 32-bit type 'unsigned int'
+CPU: 0 PID: 3606 Comm: syz-executor151 Not tainted 5.18.0-rc5-syzkaller-00165-g810c2f0a3f86 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:88 [inline]
+ dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106
+ ubsan_epilogue+0xb/0x50 lib/ubsan.c:151
+ __ubsan_handle_shift_out_of_bounds.cold+0xb1/0x187 lib/ubsan.c:322
+ tcf_pedit_init.cold+0x1a/0x1f net/sched/act_pedit.c:238
+ tcf_action_init_1+0x414/0x690 net/sched/act_api.c:1367
+ tcf_action_init+0x530/0x8d0 net/sched/act_api.c:1432
+ tcf_action_add+0xf9/0x480 net/sched/act_api.c:1956
+ tc_ctl_action+0x346/0x470 net/sched/act_api.c:2015
+ rtnetlink_rcv_msg+0x413/0xb80 net/core/rtnetlink.c:5993
+ netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2502
+ netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline]
+ netlink_unicast+0x543/0x7f0 net/netlink/af_netlink.c:1345
+ netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1921
+ sock_sendmsg_nosec net/socket.c:705 [inline]
+ sock_sendmsg+0xcf/0x120 net/socket.c:725
+ ____sys_sendmsg+0x6e2/0x800 net/socket.c:2413
+ ___sys_sendmsg+0xf3/0x170 net/socket.c:2467
+ __sys_sendmsg+0xe5/0x1b0 net/socket.c:2496
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+RIP: 0033:0x7fe36e9e1b59
+Code: 28 c3 e8 2a 14 00 00 66 2e 0f 1f 84 00 00 00 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 c0 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007ffef796fe88 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fe36e9e1b59
+RDX: 0000000000000000 RSI: 0000000020000300 RDI: 0000000000000003
+RBP: 00007fe36e9a5d00 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00007fe36e9a5d90
+R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+ </TASK>
+
+The 'shift' field is not validated, and any value above 31 will
+trigger out-of-bounds. The issue predates the git history, but
+syzbot was able to trigger it only after the commit mentioned in
+the fixes tag, and this change only applies on top of such commit.
+
+Address the issue bounding the 'shift' value to the maximum allowed
+by the relevant operator.
+
+Reported-and-tested-by: syzbot+8ed8fc4c57e9dcf23ca6@syzkaller.appspotmail.com
+Fixes: 8b796475fd78 ("net/sched: act_pedit: really ensure the skb is writable")
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/act_pedit.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 0eaaf1f45de1..211c757bfc3c 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -232,6 +232,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
+ for (i = 0; i < p->tcfp_nkeys; ++i) {
+ u32 cur = p->tcfp_keys[i].off;
+
++ /* sanitize the shift value for any later use */
++ p->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1,
++ p->tcfp_keys[i].shift);
++
+ /* The AT option can read a single byte, we can bound the actual
+ * value with uchar max.
+ */
+--
+2.35.1
+
diff --git a/queue-5.17/net-stmmac-fix-missing-pci_disable_device-on-error-i.patch b/queue-5.17/net-stmmac-fix-missing-pci_disable_device-on-error-i.patch
new file mode 100644
index 0000000..3cecd0e
--- /dev/null
+++ b/queue-5.17/net-stmmac-fix-missing-pci_disable_device-on-error-i.patch
@@ -0,0 +1,46 @@
+From a1c583ea666642665252fe37010edfacf3b1d6e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 May 2022 11:13:16 +0800
+Subject: net: stmmac: fix missing pci_disable_device() on error in
+ stmmac_pci_probe()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+[ Upstream commit 0807ce0b010418a191e0e4009803b2d74c3245d5 ]
+
+Switch to using pcim_enable_device() to avoid missing pci_disable_device().
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Link: https://lore.kernel.org/r/20220510031316.1780409-1-yangyingliang@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+index fcf17d8a0494..644bb54f5f02 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+@@ -181,7 +181,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
+ return -ENOMEM;
+
+ /* Enable pci device */
+- ret = pci_enable_device(pdev);
++ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
+ __func__);
+@@ -241,8 +241,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
+ pcim_iounmap_regions(pdev, BIT(i));
+ break;
+ }
+-
+- pci_disable_device(pdev);
+ }
+
+ static int __maybe_unused stmmac_pci_suspend(struct device *dev)
+--
+2.35.1
+
diff --git a/queue-5.17/net-systemport-fix-an-error-handling-path-in-bcm_sys.patch b/queue-5.17/net-systemport-fix-an-error-handling-path-in-bcm_sys.patch
new file mode 100644
index 0000000..446321d
--- /dev/null
+++ b/queue-5.17/net-systemport-fix-an-error-handling-path-in-bcm_sys.patch
@@ -0,0 +1,44 @@
+From ac5391cbdaf4b40666786587997e752785515f97 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 15 May 2022 19:01:56 +0200
+Subject: net: systemport: Fix an error handling path in bcm_sysport_probe()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit ef6b1cd11962aec21c58d137006ab122dbc8d6fd ]
+
+if devm_clk_get_optional() fails, we still need to go through the error
+handling path.
+
+Add the missing goto.
+
+Fixes: 6328a126896ea ("net: systemport: Manage Wake-on-LAN clock")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Link: https://lore.kernel.org/r/99d70634a81c229885ae9e4ee69b2035749f7edc.1652634040.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bcmsysport.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 60dde29974bf..df51be3cbe06 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -2585,8 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
+ device_set_wakeup_capable(&pdev->dev, 1);
+
+ priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
+- if (IS_ERR(priv->wol_clk))
+- return PTR_ERR(priv->wol_clk);
++ if (IS_ERR(priv->wol_clk)) {
++ ret = PTR_ERR(priv->wol_clk);
++ goto err_deregister_fixed_link;
++ }
+
+ /* Set the needed headroom once and for all */
+ BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
+--
+2.35.1
+
diff --git a/queue-5.17/net-vmxnet3-fix-possible-null-pointer-dereference-in.patch b/queue-5.17/net-vmxnet3-fix-possible-null-pointer-dereference-in.patch
new file mode 100644
index 0000000..c85445a
--- /dev/null
+++ b/queue-5.17/net-vmxnet3-fix-possible-null-pointer-dereference-in.patch
@@ -0,0 +1,68 @@
+From 93fe6d07b5c67364675191b0fe6bdc54c567258e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 May 2022 13:07:11 +0800
+Subject: net: vmxnet3: fix possible NULL pointer dereference in
+ vmxnet3_rq_cleanup()
+
+From: Zixuan Fu <r33s3n6@gmail.com>
+
+[ Upstream commit edf410cb74dc612fd47ef5be319c5a0bcd6e6ccd ]
+
+In vmxnet3_rq_create(), when dma_alloc_coherent() fails,
+vmxnet3_rq_destroy() is called. It sets rq->rx_ring[i].base to NULL. Then
+vmxnet3_rq_create() returns an error to its callers mxnet3_rq_create_all()
+-> vmxnet3_change_mtu(). Then vmxnet3_change_mtu() calls
+vmxnet3_force_close() -> dev_close() in error handling code. And the driver
+calls vmxnet3_close() -> vmxnet3_quiesce_dev() -> vmxnet3_rq_cleanup_all()
+-> vmxnet3_rq_cleanup(). In vmxnet3_rq_cleanup(),
+rq->rx_ring[ring_idx].base is accessed, but this variable is NULL, causing
+a NULL pointer dereference.
+
+To fix this possible bug, an if statement is added to check whether
+rq->rx_ring[0].base is NULL in vmxnet3_rq_cleanup() and exit early if so.
+
+The error log in our fault-injection testing is shown as follows:
+
+[ 65.220135] BUG: kernel NULL pointer dereference, address: 0000000000000008
+...
+[ 65.222633] RIP: 0010:vmxnet3_rq_cleanup_all+0x396/0x4e0 [vmxnet3]
+...
+[ 65.227977] Call Trace:
+...
+[ 65.228262] vmxnet3_quiesce_dev+0x80f/0x8a0 [vmxnet3]
+[ 65.228580] vmxnet3_close+0x2c4/0x3f0 [vmxnet3]
+[ 65.228866] __dev_close_many+0x288/0x350
+[ 65.229607] dev_close_many+0xa4/0x480
+[ 65.231124] dev_close+0x138/0x230
+[ 65.231933] vmxnet3_force_close+0x1f0/0x240 [vmxnet3]
+[ 65.232248] vmxnet3_change_mtu+0x75d/0x920 [vmxnet3]
+...
+
+Fixes: d1a890fa37f27 ("net: VMware virtual Ethernet NIC driver: vmxnet3")
+Reported-by: TOTE Robot <oslab@tsinghua.edu.cn>
+Signed-off-by: Zixuan Fu <r33s3n6@gmail.com>
+Link: https://lore.kernel.org/r/20220514050711.2636709-1-r33s3n6@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vmxnet3/vmxnet3_drv.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 1154f1884212..93e8d119d45f 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1668,6 +1668,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
+ u32 i, ring_idx;
+ struct Vmxnet3_RxDesc *rxd;
+
++ /* ring has already been cleaned up */
++ if (!rq->rx_ring[0].base)
++ return;
++
+ for (ring_idx = 0; ring_idx < 2; ring_idx++) {
+ for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
+ #ifdef __BIG_ENDIAN_BITFIELD
+--
+2.35.1
+
diff --git a/queue-5.17/net-vmxnet3-fix-possible-use-after-free-bugs-in-vmxn.patch b/queue-5.17/net-vmxnet3-fix-possible-use-after-free-bugs-in-vmxn.patch
new file mode 100644
index 0000000..7a96925
--- /dev/null
+++ b/queue-5.17/net-vmxnet3-fix-possible-use-after-free-bugs-in-vmxn.patch
@@ -0,0 +1,94 @@
+From bce0cc1981c0da5bcc087c346bc40e4487ca5406 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 May 2022 13:06:56 +0800
+Subject: net: vmxnet3: fix possible use-after-free bugs in
+ vmxnet3_rq_alloc_rx_buf()
+
+From: Zixuan Fu <r33s3n6@gmail.com>
+
+[ Upstream commit 9e7fef9521e73ca8afd7da9e58c14654b02dfad8 ]
+
+In vmxnet3_rq_alloc_rx_buf(), when dma_map_single() fails, rbi->skb is
+freed immediately. Similarly, in another branch, when dma_map_page() fails,
+rbi->page is also freed. In the two cases, vmxnet3_rq_alloc_rx_buf()
+returns an error to its callers vmxnet3_rq_init() -> vmxnet3_rq_init_all()
+-> vmxnet3_activate_dev(). Then vmxnet3_activate_dev() calls
+vmxnet3_rq_cleanup_all() in error handling code, and rbi->skb or rbi->page
+are freed again in vmxnet3_rq_cleanup_all(), causing use-after-free bugs.
+
+To fix these possible bugs, rbi->skb and rbi->page should be cleared after
+they are freed.
+
+The error log in our fault-injection testing is shown as follows:
+
+[ 14.319016] BUG: KASAN: use-after-free in consume_skb+0x2f/0x150
+...
+[ 14.321586] Call Trace:
+...
+[ 14.325357] consume_skb+0x2f/0x150
+[ 14.325671] vmxnet3_rq_cleanup_all+0x33a/0x4e0 [vmxnet3]
+[ 14.326150] vmxnet3_activate_dev+0xb9d/0x2ca0 [vmxnet3]
+[ 14.326616] vmxnet3_open+0x387/0x470 [vmxnet3]
+...
+[ 14.361675] Allocated by task 351:
+...
+[ 14.362688] __netdev_alloc_skb+0x1b3/0x6f0
+[ 14.362960] vmxnet3_rq_alloc_rx_buf+0x1b0/0x8d0 [vmxnet3]
+[ 14.363317] vmxnet3_activate_dev+0x3e3/0x2ca0 [vmxnet3]
+[ 14.363661] vmxnet3_open+0x387/0x470 [vmxnet3]
+...
+[ 14.367309]
+[ 14.367412] Freed by task 351:
+...
+[ 14.368932] __dev_kfree_skb_any+0xd2/0xe0
+[ 14.369193] vmxnet3_rq_alloc_rx_buf+0x71e/0x8d0 [vmxnet3]
+[ 14.369544] vmxnet3_activate_dev+0x3e3/0x2ca0 [vmxnet3]
+[ 14.369883] vmxnet3_open+0x387/0x470 [vmxnet3]
+[ 14.370174] __dev_open+0x28a/0x420
+[ 14.370399] __dev_change_flags+0x192/0x590
+[ 14.370667] dev_change_flags+0x7a/0x180
+[ 14.370919] do_setlink+0xb28/0x3570
+[ 14.371150] rtnl_newlink+0x1160/0x1740
+[ 14.371399] rtnetlink_rcv_msg+0x5bf/0xa50
+[ 14.371661] netlink_rcv_skb+0x1cd/0x3e0
+[ 14.371913] netlink_unicast+0x5dc/0x840
+[ 14.372169] netlink_sendmsg+0x856/0xc40
+[ 14.372420] ____sys_sendmsg+0x8a7/0x8d0
+[ 14.372673] __sys_sendmsg+0x1c2/0x270
+[ 14.372914] do_syscall_64+0x41/0x90
+[ 14.373145] entry_SYSCALL_64_after_hwframe+0x44/0xae
+...
+
+Fixes: 5738a09d58d5a ("vmxnet3: fix checks for dma mapping errors")
+Reported-by: TOTE Robot <oslab@tsinghua.edu.cn>
+Signed-off-by: Zixuan Fu <r33s3n6@gmail.com>
+Link: https://lore.kernel.org/r/20220514050656.2636588-1-r33s3n6@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vmxnet3/vmxnet3_drv.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index d9d90baac72a..1154f1884212 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -589,6 +589,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
+ if (dma_mapping_error(&adapter->pdev->dev,
+ rbi->dma_addr)) {
+ dev_kfree_skb_any(rbi->skb);
++ rbi->skb = NULL;
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+@@ -613,6 +614,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
+ if (dma_mapping_error(&adapter->pdev->dev,
+ rbi->dma_addr)) {
+ put_page(rbi->page);
++ rbi->page = NULL;
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+--
+2.35.1
+
diff --git a/queue-5.17/netfilter-flowtable-fix-excessive-hw-offload-attempt.patch b/queue-5.17/netfilter-flowtable-fix-excessive-hw-offload-attempt.patch
new file mode 100644
index 0000000..b7792ca
--- /dev/null
+++ b/queue-5.17/netfilter-flowtable-fix-excessive-hw-offload-attempt.patch
@@ -0,0 +1,41 @@
+From 79a6349a75068045678bc4a482e19dc70c458db1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:26:13 +0200
+Subject: netfilter: flowtable: fix excessive hw offload attempts after failure
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit 396ef64113a8ba01c46315d67a99db8dde3eef51 ]
+
+If a flow cannot be offloaded, the code currently repeatedly tries again as
+quickly as possible, which can significantly increase system load.
+Fix this by limiting flow timeout update and hardware offload retry to once
+per second.
+
+Fixes: c07531c01d82 ("netfilter: flowtable: Remove redundant hw refresh bit")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_flow_table_core.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index b90eca7a2f22..52e7f94d2450 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -329,8 +329,10 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
+ u32 timeout;
+
+ timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+- if (READ_ONCE(flow->timeout) != timeout)
++ if (timeout - READ_ONCE(flow->timeout) > HZ)
+ WRITE_ONCE(flow->timeout, timeout);
++ else
++ return;
+
+ if (likely(!nf_flowtable_hw_offload(flow_table)))
+ return;
+--
+2.35.1
+
diff --git a/queue-5.17/netfilter-flowtable-fix-tcp-flow-teardown.patch b/queue-5.17/netfilter-flowtable-fix-tcp-flow-teardown.patch
new file mode 100644
index 0000000..62d4576e
--- /dev/null
+++ b/queue-5.17/netfilter-flowtable-fix-tcp-flow-teardown.patch
@@ -0,0 +1,162 @@
+From b27a108487a65557916c53708728ad3aa8d2dbe6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 May 2022 10:44:14 +0200
+Subject: netfilter: flowtable: fix TCP flow teardown
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit e5eaac2beb54f0a16ff851125082d9faeb475572 ]
+
+This patch addresses three possible problems:
+
+1. ct gc may race to undo the timeout adjustment of the packet path, leaving
+ the conntrack entry in place with the internal offload timeout (one day).
+
+2. ct gc removes the ct because the IPS_OFFLOAD_BIT is not set and the CLOSE
+ timeout is reached before the flow offload del.
+
+3. tcp ct is always set to ESTABLISHED with a very long timeout
+ in flow offload teardown/delete even though the state might be already
+ CLOSED. Also as a remark we cannot assume that the FIN or RST packet
+ is hitting flow table teardown as the packet might get bumped to the
+ slow path in nftables.
+
+This patch resets IPS_OFFLOAD_BIT from flow_offload_teardown(), so
+conntrack handles the tcp rst/fin packet which triggers the CLOSE/FIN
+state transition.
+
+Moreover, teturn the connection's ownership to conntrack upon teardown
+by clearing the offload flag and fixing the established timeout value.
+The flow table GC thread will asynchonrnously free the flow table and
+hardware offload entries.
+
+Before this patch, the IPS_OFFLOAD_BIT remained set for expired flows on
+which is also misleading since the flow is back to classic conntrack
+path.
+
+If nf_ct_delete() removes the entry from the conntrack table, then it
+calls nf_ct_put() which decrements the refcnt. This is not a problem
+because the flowtable holds a reference to the conntrack object from
+flow_offload_alloc() path which is released via flow_offload_free().
+
+This patch also updates nft_flow_offload to skip packets in SYN_RECV
+state. Since we might miss or bump packets to slow path, we do not know
+what will happen there while we are still in SYN_RECV, this patch
+postpones offload up to the next packet which also aligns to the
+existing behaviour in tc-ct.
+
+flow_offload_teardown() does not reset the existing tcp state from
+flow_offload_fixup_tcp() to ESTABLISHED anymore, packets bump to slow
+path might have already update the state to CLOSE/FIN.
+
+Joint work with Oz and Sven.
+
+Fixes: 1e5b2471bcc4 ("netfilter: nf_flow_table: teardown flow timeout race")
+Signed-off-by: Oz Shlomo <ozsh@nvidia.com>
+Signed-off-by: Sven Auhagen <sven.auhagen@voleatech.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_flow_table_core.c | 33 +++++++-----------------------
+ net/netfilter/nft_flow_offload.c | 3 ++-
+ 2 files changed, 9 insertions(+), 27 deletions(-)
+
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 52e7f94d2450..58f3f77b3eb2 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -173,12 +173,11 @@ EXPORT_SYMBOL_GPL(flow_offload_route_init);
+
+ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
+ {
+- tcp->state = TCP_CONNTRACK_ESTABLISHED;
+ tcp->seen[0].td_maxwin = 0;
+ tcp->seen[1].td_maxwin = 0;
+ }
+
+-static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
++static void flow_offload_fixup_ct(struct nf_conn *ct)
+ {
+ struct net *net = nf_ct_net(ct);
+ int l4num = nf_ct_protonum(ct);
+@@ -187,7 +186,9 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
+ if (l4num == IPPROTO_TCP) {
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
+
+- timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
++ flow_offload_fixup_tcp(&ct->proto.tcp);
++
++ timeout = tn->timeouts[ct->proto.tcp.state];
+ timeout -= tn->offload_timeout;
+ } else if (l4num == IPPROTO_UDP) {
+ struct nf_udp_net *tn = nf_udp_pernet(net);
+@@ -205,18 +206,6 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
+ }
+
+-static void flow_offload_fixup_ct_state(struct nf_conn *ct)
+-{
+- if (nf_ct_protonum(ct) == IPPROTO_TCP)
+- flow_offload_fixup_tcp(&ct->proto.tcp);
+-}
+-
+-static void flow_offload_fixup_ct(struct nf_conn *ct)
+-{
+- flow_offload_fixup_ct_state(ct);
+- flow_offload_fixup_ct_timeout(ct);
+-}
+-
+ static void flow_offload_route_release(struct flow_offload *flow)
+ {
+ nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
+@@ -355,22 +344,14 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
+ rhashtable_remove_fast(&flow_table->rhashtable,
+ &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
+ nf_flow_offload_rhash_params);
+-
+- clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
+-
+- if (nf_flow_has_expired(flow))
+- flow_offload_fixup_ct(flow->ct);
+- else
+- flow_offload_fixup_ct_timeout(flow->ct);
+-
+ flow_offload_free(flow);
+ }
+
+ void flow_offload_teardown(struct flow_offload *flow)
+ {
++ clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
+ set_bit(NF_FLOW_TEARDOWN, &flow->flags);
+-
+- flow_offload_fixup_ct_state(flow->ct);
++ flow_offload_fixup_ct(flow->ct);
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_teardown);
+
+@@ -460,7 +441,7 @@ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
+ if (nf_flow_has_expired(flow) ||
+ nf_ct_is_dying(flow->ct) ||
+ nf_flow_has_stale_dst(flow))
+- set_bit(NF_FLOW_TEARDOWN, &flow->flags);
++ flow_offload_teardown(flow);
+
+ if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
+ if (test_bit(NF_FLOW_HW, &flow->flags)) {
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index 12145a80ef03..aac6db8680d4 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -298,7 +298,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
+ case IPPROTO_TCP:
+ tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt),
+ sizeof(_tcph), &_tcph);
+- if (unlikely(!tcph || tcph->fin || tcph->rst))
++ if (unlikely(!tcph || tcph->fin || tcph->rst ||
++ !nf_conntrack_tcp_established(ct)))
+ goto out;
+ break;
+ case IPPROTO_UDP:
+--
+2.35.1
+
diff --git a/queue-5.17/netfilter-flowtable-move-dst_check-to-packet-path.patch b/queue-5.17/netfilter-flowtable-move-dst_check-to-packet-path.patch
new file mode 100644
index 0000000..0fa2645
--- /dev/null
+++ b/queue-5.17/netfilter-flowtable-move-dst_check-to-packet-path.patch
@@ -0,0 +1,110 @@
+From 222bf14fd42ef0d6de2f4da0ae33cf59a8f84b89 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 May 2022 12:55:30 +0200
+Subject: netfilter: flowtable: move dst_check to packet path
+
+From: Ritaro Takenaka <ritarot634@gmail.com>
+
+[ Upstream commit 2738d9d963bd1f06d5114c2b4fa5771a95703991 ]
+
+Fixes sporadic IPv6 packet loss when flow offloading is enabled.
+
+IPv6 route GC and flowtable GC are not synchronized.
+When dst_cache becomes stale and a packet passes through the flow before
+the flowtable GC teardowns it, the packet can be dropped.
+So, it is necessary to check dst every time in packet path.
+
+Fixes: 227e1e4d0d6c ("netfilter: nf_flowtable: skip device lookup from interface index")
+Signed-off-by: Ritaro Takenaka <ritarot634@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_flow_table_core.c | 23 +----------------------
+ net/netfilter/nf_flow_table_ip.c | 19 +++++++++++++++++++
+ 2 files changed, 20 insertions(+), 22 deletions(-)
+
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index de783c9094d7..9fb407084c50 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -415,32 +415,11 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
+ return err;
+ }
+
+-static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
+-{
+- struct dst_entry *dst;
+-
+- if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
+- tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
+- dst = tuple->dst_cache;
+- if (!dst_check(dst, tuple->dst_cookie))
+- return true;
+- }
+-
+- return false;
+-}
+-
+-static bool nf_flow_has_stale_dst(struct flow_offload *flow)
+-{
+- return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
+- flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
+-}
+-
+ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
+ struct flow_offload *flow, void *data)
+ {
+ if (nf_flow_has_expired(flow) ||
+- nf_ct_is_dying(flow->ct) ||
+- nf_flow_has_stale_dst(flow))
++ nf_ct_is_dying(flow->ct))
+ flow_offload_teardown(flow);
+
+ if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
+diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
+index 6257d87c3a56..28026467b54c 100644
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -227,6 +227,15 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+ return true;
+ }
+
++static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
++{
++ if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
++ tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
++ return true;
++
++ return dst_check(tuple->dst_cache, tuple->dst_cookie);
++}
++
+ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ struct dst_entry *dst)
+@@ -346,6 +355,11 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
+ return NF_ACCEPT;
+
++ if (!nf_flow_dst_check(&tuplehash->tuple)) {
++ flow_offload_teardown(flow);
++ return NF_ACCEPT;
++ }
++
+ if (skb_try_make_writable(skb, thoff + hdrsize))
+ return NF_DROP;
+
+@@ -582,6 +596,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
+ return NF_ACCEPT;
+
++ if (!nf_flow_dst_check(&tuplehash->tuple)) {
++ flow_offload_teardown(flow);
++ return NF_ACCEPT;
++ }
++
+ if (skb_try_make_writable(skb, thoff + hdrsize))
+ return NF_DROP;
+
+--
+2.35.1
+
diff --git a/queue-5.17/netfilter-flowtable-pass-flowtable-to-nf_flow_table_.patch b/queue-5.17/netfilter-flowtable-pass-flowtable-to-nf_flow_table_.patch
new file mode 100644
index 0000000..99f27eb
--- /dev/null
+++ b/queue-5.17/netfilter-flowtable-pass-flowtable-to-nf_flow_table_.patch
@@ -0,0 +1,90 @@
+From 240f631beee3dc1c8f3fddab6e048a8c85018c40 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Mar 2022 13:11:24 +0100
+Subject: netfilter: flowtable: pass flowtable to nf_flow_table_iterate()
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 217cff36e885627c41a14e803fc44f9cbc945767 ]
+
+The flowtable object is already passed as argument to
+nf_flow_table_iterate(), do use not data pointer to pass flowtable.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_flow_table_core.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 58f3f77b3eb2..de783c9094d7 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -382,7 +382,8 @@ EXPORT_SYMBOL_GPL(flow_offload_lookup);
+
+ static int
+ nf_flow_table_iterate(struct nf_flowtable *flow_table,
+- void (*iter)(struct flow_offload *flow, void *data),
++ void (*iter)(struct nf_flowtable *flowtable,
++ struct flow_offload *flow, void *data),
+ void *data)
+ {
+ struct flow_offload_tuple_rhash *tuplehash;
+@@ -406,7 +407,7 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
+
+ flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
+
+- iter(flow, data);
++ iter(flow_table, flow, data);
+ }
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+@@ -434,10 +435,9 @@ static bool nf_flow_has_stale_dst(struct flow_offload *flow)
+ flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
+ }
+
+-static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
++static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
++ struct flow_offload *flow, void *data)
+ {
+- struct nf_flowtable *flow_table = data;
+-
+ if (nf_flow_has_expired(flow) ||
+ nf_ct_is_dying(flow->ct) ||
+ nf_flow_has_stale_dst(flow))
+@@ -462,7 +462,7 @@ static void nf_flow_offload_work_gc(struct work_struct *work)
+ struct nf_flowtable *flow_table;
+
+ flow_table = container_of(work, struct nf_flowtable, gc_work.work);
+- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
++ nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
+ queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
+ }
+
+@@ -578,7 +578,8 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_table_init);
+
+-static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
++static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table,
++ struct flow_offload *flow, void *data)
+ {
+ struct net_device *dev = data;
+
+@@ -620,11 +621,10 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
+
+ cancel_delayed_work_sync(&flow_table->gc_work);
+ nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
+- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
++ nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
+ nf_flow_table_offload_flush(flow_table);
+ if (nf_flowtable_hw_offload(flow_table))
+- nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
+- flow_table);
++ nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
+ rhashtable_destroy(&flow_table->rhashtable);
+ }
+ EXPORT_SYMBOL_GPL(nf_flow_table_free);
+--
+2.35.1
+
diff --git a/queue-5.17/netfilter-nft_flow_offload-fix-offload-with-pppoe-vl.patch b/queue-5.17/netfilter-nft_flow_offload-fix-offload-with-pppoe-vl.patch
new file mode 100644
index 0000000..f7bbc23
--- /dev/null
+++ b/queue-5.17/netfilter-nft_flow_offload-fix-offload-with-pppoe-vl.patch
@@ -0,0 +1,38 @@
+From 83ae80890b58a2ea0ed64d2c49749d100adb7830 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:26:16 +0200
+Subject: netfilter: nft_flow_offload: fix offload with pppoe + vlan
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit 2456074935003b66c40f78df6adfc722435d43ea ]
+
+When running a combination of PPPoE on top of a VLAN, we need to set
+info->outdev to the PPPoE device, otherwise PPPoE encap is skipped
+during software offload.
+
+Fixes: 72efd585f714 ("netfilter: flowtable: add pppoe support")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_flow_offload.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index dd824193c920..12145a80ef03 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -123,7 +123,8 @@ static void nft_dev_path_info(const struct net_device_path_stack *stack,
+ info->indev = NULL;
+ break;
+ }
+- info->outdev = path->dev;
++ if (!info->outdev)
++ info->outdev = path->dev;
+ info->encap[info->num_encaps].id = path->encap.id;
+ info->encap[info->num_encaps].proto = path->encap.proto;
+ info->num_encaps++;
+--
+2.35.1
+
diff --git a/queue-5.17/netfilter-nft_flow_offload-skip-dst-neigh-lookup-for.patch b/queue-5.17/netfilter-nft_flow_offload-skip-dst-neigh-lookup-for.patch
new file mode 100644
index 0000000..738d083
--- /dev/null
+++ b/queue-5.17/netfilter-nft_flow_offload-skip-dst-neigh-lookup-for.patch
@@ -0,0 +1,78 @@
+From 029bf5f7fed2d19250a34a8e9ddbaa20871e267c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 May 2022 14:26:14 +0200
+Subject: netfilter: nft_flow_offload: skip dst neigh lookup for ppp devices
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit 45ca3e61999e9a30ca2b7cfbf9da8a9f8d13be31 ]
+
+The dst entry does not contain a valid hardware address, so skip the lookup
+in order to avoid running into errors here.
+The proper hardware address is filled in from nft_dev_path_info
+
+Fixes: 72efd585f714 ("netfilter: flowtable: add pppoe support")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_flow_offload.c | 22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index 0af34ad41479..dd824193c920 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -36,6 +36,15 @@ static void nft_default_forward_path(struct nf_flow_route *route,
+ route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
+ }
+
++static bool nft_is_valid_ether_device(const struct net_device *dev)
++{
++ if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
++ dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
++ return false;
++
++ return true;
++}
++
+ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
+ const struct dst_entry *dst_cache,
+ const struct nf_conn *ct,
+@@ -47,6 +56,9 @@ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
+ struct neighbour *n;
+ u8 nud_state;
+
++ if (!nft_is_valid_ether_device(dev))
++ goto out;
++
+ n = dst_neigh_lookup(dst_cache, daddr);
+ if (!n)
+ return -1;
+@@ -60,6 +72,7 @@ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
+ if (!(nud_state & NUD_VALID))
+ return -1;
+
++out:
+ return dev_fill_forward_path(dev, ha, stack);
+ }
+
+@@ -78,15 +91,6 @@ struct nft_forward_info {
+ enum flow_offload_xmit_type xmit_type;
+ };
+
+-static bool nft_is_valid_ether_device(const struct net_device *dev)
+-{
+- if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
+- dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
+- return false;
+-
+- return true;
+-}
+-
+ static void nft_dev_path_info(const struct net_device_path_stack *stack,
+ struct nft_forward_info *info,
+ unsigned char *ha, struct nf_flowtable *flowtable)
+--
+2.35.1
+
diff --git a/queue-5.17/nfc-nci-fix-sleep-in-atomic-context-bugs-caused-by-n.patch b/queue-5.17/nfc-nci-fix-sleep-in-atomic-context-bugs-caused-by-n.patch
new file mode 100644
index 0000000..11d7453
--- /dev/null
+++ b/queue-5.17/nfc-nci-fix-sleep-in-atomic-context-bugs-caused-by-n.patch
@@ -0,0 +1,81 @@
+From bb20d4ba7e65492e8b1df076f3d2ecd30c4d6ffe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 May 2022 09:25:30 +0800
+Subject: NFC: nci: fix sleep in atomic context bugs caused by nci_skb_alloc
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit 23dd4581350d4ffa23d58976ec46408f8f4c1e16 ]
+
+There are sleep in atomic context bugs when the request to secure
+element of st-nci is timeout. The root cause is that nci_skb_alloc
+with GFP_KERNEL parameter is called in st_nci_se_wt_timeout which is
+a timer handler. The call paths that could trigger bugs are shown below:
+
+ (interrupt context 1)
+st_nci_se_wt_timeout
+ nci_hci_send_event
+ nci_hci_send_data
+ nci_skb_alloc(..., GFP_KERNEL) //may sleep
+
+ (interrupt context 2)
+st_nci_se_wt_timeout
+ nci_hci_send_event
+ nci_hci_send_data
+ nci_send_data
+ nci_queue_tx_data_frags
+ nci_skb_alloc(..., GFP_KERNEL) //may sleep
+
+This patch changes allocation mode of nci_skb_alloc from GFP_KERNEL to
+GFP_ATOMIC in order to prevent atomic context sleeping. The GFP_ATOMIC
+flag makes memory allocation operation could be used in atomic context.
+
+Fixes: ed06aeefdac3 ("nfc: st-nci: Rename st21nfcb to st-nci")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20220517012530.75714-1-duoming@zju.edu.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/data.c | 2 +-
+ net/nfc/nci/hci.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
+index 6055dc9a82aa..aa5e712adf07 100644
+--- a/net/nfc/nci/data.c
++++ b/net/nfc/nci/data.c
+@@ -118,7 +118,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
+
+ skb_frag = nci_skb_alloc(ndev,
+ (NCI_DATA_HDR_SIZE + frag_len),
+- GFP_KERNEL);
++ GFP_ATOMIC);
+ if (skb_frag == NULL) {
+ rc = -ENOMEM;
+ goto free_exit;
+diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
+index 19703a649b5a..78c4b6addf15 100644
+--- a/net/nfc/nci/hci.c
++++ b/net/nfc/nci/hci.c
+@@ -153,7 +153,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
+
+ i = 0;
+ skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len +
+- NCI_DATA_HDR_SIZE, GFP_KERNEL);
++ NCI_DATA_HDR_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+@@ -184,7 +184,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
+ if (i < data_len) {
+ skb = nci_skb_alloc(ndev,
+ conn_info->max_pkt_payload_len +
+- NCI_DATA_HDR_SIZE, GFP_KERNEL);
++ NCI_DATA_HDR_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+--
+2.35.1
+
diff --git a/queue-5.17/nl80211-fix-locking-in-nl80211_set_tx_bitrate_mask.patch b/queue-5.17/nl80211-fix-locking-in-nl80211_set_tx_bitrate_mask.patch
new file mode 100644
index 0000000..7f4a457
--- /dev/null
+++ b/queue-5.17/nl80211-fix-locking-in-nl80211_set_tx_bitrate_mask.patch
@@ -0,0 +1,53 @@
+From b1a0e7dff325444744e3c2290991a7012803c407 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 May 2022 10:21:38 +0200
+Subject: nl80211: fix locking in nl80211_set_tx_bitrate_mask()
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit f971e1887fdb3ab500c9bebf4b98f62d49a20655 ]
+
+This accesses the wdev's chandef etc., so cannot safely
+be used without holding the lock.
+
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Link: https://lore.kernel.org/r/20220506102136.06b7205419e6.I2a87c05fbd8bc5e565e84d190d4cfd2e92695a90@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/nl80211.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 06a35f1bec23..0c20df052db3 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -11573,18 +11573,23 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
+ struct cfg80211_bitrate_mask mask;
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
++ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ if (!rdev->ops->set_bitrate_mask)
+ return -EOPNOTSUPP;
+
++ wdev_lock(wdev);
+ err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
+ NL80211_ATTR_TX_RATES, &mask,
+ dev, true);
+ if (err)
+- return err;
++ goto out;
+
+- return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
++ err = rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
++out:
++ wdev_unlock(wdev);
++ return err;
+ }
+
+ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
+--
+2.35.1
+
diff --git a/queue-5.17/nl80211-validate-s1g-channel-width.patch b/queue-5.17/nl80211-validate-s1g-channel-width.patch
new file mode 100644
index 0000000..d67f21d
--- /dev/null
+++ b/queue-5.17/nl80211-validate-s1g-channel-width.patch
@@ -0,0 +1,44 @@
+From e64d735f33e05d8f5d080d6e8582d5405eb3f1e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Apr 2022 04:13:21 +0000
+Subject: nl80211: validate S1G channel width
+
+From: Kieran Frewen <kieran.frewen@morsemicro.com>
+
+[ Upstream commit 5d087aa759eb82b8208411913f6c2158bd85abc0 ]
+
+Validate the S1G channel width input by user to ensure it matches
+that of the requested channel
+
+Signed-off-by: Kieran Frewen <kieran.frewen@morsemicro.com>
+Signed-off-by: Bassem Dawood <bassem@morsemicro.com>
+Link: https://lore.kernel.org/r/20220420041321.3788789-2-kieran.frewen@morsemicro.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/wireless/nl80211.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index dc171ca0d1b1..06a35f1bec23 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3128,6 +3128,15 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+ } else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) {
+ chandef->width =
+ nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]);
++ if (chandef->chan->band == NL80211_BAND_S1GHZ) {
++ /* User input error for channel width doesn't match channel */
++ if (chandef->width != ieee80211_s1g_channel_width(chandef->chan)) {
++ NL_SET_ERR_MSG_ATTR(extack,
++ attrs[NL80211_ATTR_CHANNEL_WIDTH],
++ "bad channel width");
++ return -EINVAL;
++ }
++ }
+ if (attrs[NL80211_ATTR_CENTER_FREQ1]) {
+ chandef->center_freq1 =
+ nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]);
+--
+2.35.1
+
diff --git a/queue-5.17/perf-bench-numa-address-compiler-error-on-s390.patch b/queue-5.17/perf-bench-numa-address-compiler-error-on-s390.patch
new file mode 100644
index 0000000..ae0b19c
--- /dev/null
+++ b/queue-5.17/perf-bench-numa-address-compiler-error-on-s390.patch
@@ -0,0 +1,67 @@
+From 859f6624f033d62feaec818a9d6212dd14703236 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 May 2022 10:11:58 +0200
+Subject: perf bench numa: Address compiler error on s390
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Richter <tmricht@linux.ibm.com>
+
+[ Upstream commit f8ac1c478424a9a14669b8cef7389b1e14e5229d ]
+
+The compilation on s390 results in this error:
+
+ # make DEBUG=y bench/numa.o
+ ...
+ bench/numa.c: In function ‘__bench_numa’:
+ bench/numa.c:1749:81: error: ‘%d’ directive output may be truncated
+ writing between 1 and 11 bytes into a region of size between
+ 10 and 20 [-Werror=format-truncation=]
+ 1749 | snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
+ ^~
+ ...
+ bench/numa.c:1749:64: note: directive argument in the range
+ [-2147483647, 2147483646]
+ ...
+ #
+
+The maximum length of the %d replacement is 11 characters because of the
+negative sign. Therefore extend the array by two more characters.
+
+Output after:
+
+ # make DEBUG=y bench/numa.o > /dev/null 2>&1; ll bench/numa.o
+ -rw-r--r-- 1 root root 418320 May 19 09:11 bench/numa.o
+ #
+
+Fixes: 3aff8ba0a4c9c919 ("perf bench numa: Avoid possible truncation when using snprintf()")
+Suggested-by: Namhyung Kim <namhyung@gmail.com>
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Cc: Sven Schnelle <svens@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Link: https://lore.kernel.org/r/20220520081158.2990006-1-tmricht@linux.ibm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/bench/numa.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
+index f2640179ada9..c2c81567afa5 100644
+--- a/tools/perf/bench/numa.c
++++ b/tools/perf/bench/numa.c
+@@ -1672,7 +1672,7 @@ static int __bench_numa(const char *name)
+ "GB/sec,", "total-speed", "GB/sec total speed");
+
+ if (g->p.show_details >= 2) {
+- char tname[14 + 2 * 10 + 1];
++ char tname[14 + 2 * 11 + 1];
+ struct thread_data *td;
+ for (p = 0; p < g->p.nr_proc; p++) {
+ for (t = 0; t < g->p.nr_threads; t++) {
+--
+2.35.1
+
diff --git a/queue-5.17/perf-build-fix-check-for-btf__load_from_kernel_by_id.patch b/queue-5.17/perf-build-fix-check-for-btf__load_from_kernel_by_id.patch
new file mode 100644
index 0000000..ce94b1f
--- /dev/null
+++ b/queue-5.17/perf-build-fix-check-for-btf__load_from_kernel_by_id.patch
@@ -0,0 +1,186 @@
+From 785d8d20e567a9e0137c49ab187fa998cb0082df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 May 2022 21:25:12 -0300
+Subject: perf build: Fix check for btf__load_from_kernel_by_id() in libbpf
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+[ Upstream commit 0ae065a5d265bc5ada13e350015458e0c5e5c351 ]
+
+Avi Kivity reported a problem where the __weak
+btf__load_from_kernel_by_id() in tools/perf/util/bpf-event.c was being
+used and it called btf__get_from_id() in tools/lib/bpf/btf.c that in
+turn called back to btf__load_from_kernel_by_id(), resulting in an
+endless loop.
+
+Fix this by adding a feature test to check if
+btf__load_from_kernel_by_id() is available when building perf with
+LIBBPF_DYNAMIC=1, and if not then provide the fallback to the old
+btf__get_from_id(), that doesn't call back to btf__load_from_kernel_by_id()
+since at that time it didn't exist at all.
+
+Tested on Fedora 35 where we have libbpf-devel 0.4.0 with LIBBPF_DYNAMIC
+where we don't have btf__load_from_kernel_by_id() and thus its feature
+test fail, not defining HAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID:
+
+ $ cat /tmp/build/perf-urgent/feature/test-libbpf-btf__load_from_kernel_by_id.make.output
+ test-libbpf-btf__load_from_kernel_by_id.c: In function ‘main’:
+ test-libbpf-btf__load_from_kernel_by_id.c:6:16: error: implicit declaration of function ‘btf__load_from_kernel_by_id’ [-Werror=implicit-function-declaration]
+ 6 | return btf__load_from_kernel_by_id(20151128, NULL);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~
+ cc1: all warnings being treated as errors
+ $
+
+ $ nm /tmp/build/perf-urgent/perf | grep btf__load_from_kernel_by_id
+ 00000000005ba180 T btf__load_from_kernel_by_id
+ $
+
+ $ objdump --disassemble=btf__load_from_kernel_by_id -S /tmp/build/perf-urgent/perf
+
+ /tmp/build/perf-urgent/perf: file format elf64-x86-64
+ <SNIP>
+ 00000000005ba180 <btf__load_from_kernel_by_id>:
+ #include "record.h"
+ #include "util/synthetic-events.h"
+
+ #ifndef HAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
+ struct btf *btf__load_from_kernel_by_id(__u32 id)
+ {
+ 5ba180: 55 push %rbp
+ 5ba181: 48 89 e5 mov %rsp,%rbp
+ 5ba184: 48 83 ec 10 sub $0x10,%rsp
+ 5ba188: 64 48 8b 04 25 28 00 mov %fs:0x28,%rax
+ 5ba18f: 00 00
+ 5ba191: 48 89 45 f8 mov %rax,-0x8(%rbp)
+ 5ba195: 31 c0 xor %eax,%eax
+ struct btf *btf;
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+ int err = btf__get_from_id(id, &btf);
+ 5ba197: 48 8d 75 f0 lea -0x10(%rbp),%rsi
+ 5ba19b: e8 a0 57 e5 ff call 40f940 <btf__get_from_id@plt>
+ 5ba1a0: 89 c2 mov %eax,%edx
+ #pragma GCC diagnostic pop
+
+ return err ? ERR_PTR(err) : btf;
+ 5ba1a2: 48 98 cltq
+ 5ba1a4: 85 d2 test %edx,%edx
+ 5ba1a6: 48 0f 44 45 f0 cmove -0x10(%rbp),%rax
+ }
+ <SNIP>
+
+Fixes: 218e7b775d368f38 ("perf bpf: Provide a weak btf__load_from_kernel_by_id() for older libbpf versions")
+Reported-by: Avi Kivity <avi@scylladb.com>
+Link: https://lore.kernel.org/linux-perf-users/f0add43b-3de5-20c5-22c4-70aff4af959f@scylladb.com
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Link: https://lore.kernel.org/linux-perf-users/YobjjFOblY4Xvwo7@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/build/Makefile.feature | 1 +
+ tools/build/feature/Makefile | 4 ++++
+ .../feature/test-libbpf-btf__load_from_kernel_by_id.c | 7 +++++++
+ tools/perf/Makefile.config | 7 +++++++
+ tools/perf/util/bpf-event.c | 4 +++-
+ 5 files changed, 22 insertions(+), 1 deletion(-)
+ create mode 100644 tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c
+
+diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
+index ae61f464043a..c6a48d0ef9ff 100644
+--- a/tools/build/Makefile.feature
++++ b/tools/build/Makefile.feature
+@@ -98,6 +98,7 @@ FEATURE_TESTS_EXTRA := \
+ llvm-version \
+ clang \
+ libbpf \
++ libbpf-btf__load_from_kernel_by_id \
+ libpfm4 \
+ libdebuginfod \
+ clang-bpf-co-re
+diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
+index de66e1cc0734..cb4a2a4fa2e4 100644
+--- a/tools/build/feature/Makefile
++++ b/tools/build/feature/Makefile
+@@ -57,6 +57,7 @@ FILES= \
+ test-lzma.bin \
+ test-bpf.bin \
+ test-libbpf.bin \
++ test-libbpf-btf__load_from_kernel_by_id.bin \
+ test-get_cpuid.bin \
+ test-sdt.bin \
+ test-cxx.bin \
+@@ -287,6 +288,9 @@ $(OUTPUT)test-bpf.bin:
+ $(OUTPUT)test-libbpf.bin:
+ $(BUILD) -lbpf
+
++$(OUTPUT)test-libbpf-btf__load_from_kernel_by_id.bin:
++ $(BUILD) -lbpf
++
+ $(OUTPUT)test-sdt.bin:
+ $(BUILD)
+
+diff --git a/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c b/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c
+new file mode 100644
+index 000000000000..f7c084428735
+--- /dev/null
++++ b/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c
+@@ -0,0 +1,7 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <bpf/libbpf.h>
++
++int main(void)
++{
++ return btf__load_from_kernel_by_id(20151128, NULL);
++}
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index f3bf9297bcc0..1bd64e7404b9 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -553,9 +553,16 @@ ifndef NO_LIBELF
+ ifeq ($(feature-libbpf), 1)
+ EXTLIBS += -lbpf
+ $(call detected,CONFIG_LIBBPF_DYNAMIC)
++
++ $(call feature_check,libbpf-btf__load_from_kernel_by_id)
++ ifeq ($(feature-libbpf-btf__load_from_kernel_by_id), 1)
++ CFLAGS += -DHAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
++ endif
+ else
+ dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
+ endif
++ else
++ CFLAGS += -DHAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
+ endif
+ endif
+
+diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
+index a517eaa51eb3..65dfd2c70246 100644
+--- a/tools/perf/util/bpf-event.c
++++ b/tools/perf/util/bpf-event.c
+@@ -22,7 +22,8 @@
+ #include "record.h"
+ #include "util/synthetic-events.h"
+
+-struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
++#ifndef HAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
++struct btf *btf__load_from_kernel_by_id(__u32 id)
+ {
+ struct btf *btf;
+ #pragma GCC diagnostic push
+@@ -32,6 +33,7 @@ struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
+
+ return err ? ERR_PTR(err) : btf;
+ }
++#endif
+
+ struct bpf_program * __weak
+ bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
+--
+2.35.1
+
diff --git a/queue-5.17/perf-regs-x86-fix-arch__intr_reg_mask-for-the-hybrid.patch b/queue-5.17/perf-regs-x86-fix-arch__intr_reg_mask-for-the-hybrid.patch
new file mode 100644
index 0000000..366fee8
--- /dev/null
+++ b/queue-5.17/perf-regs-x86-fix-arch__intr_reg_mask-for-the-hybrid.patch
@@ -0,0 +1,90 @@
+From 2bcd703c4666a75d9186d11e8317b2aa41e3fa72 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 May 2022 07:51:25 -0700
+Subject: perf regs x86: Fix arch__intr_reg_mask() for the hybrid platform
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+[ Upstream commit 01b28e4a58152e8906eeb5f1b55a0c404c48c7c8 ]
+
+The X86 specific arch__intr_reg_mask() is to check whether the kernel
+and hardware can collect XMM registers. But it doesn't work on some
+hybrid platform.
+
+Without the patch on ADL-N:
+
+ $ perf record -I?
+ available registers: AX BX CX DX SI DI BP SP IP FLAGS CS SS R8 R9 R10
+ R11 R12 R13 R14 R15
+
+The config of the test event doesn't contain the PMU information. The
+kernel may fail to initialize it on the correct hybrid PMU and return
+the wrong non-supported information.
+
+Add the PMU information into the config for the hybrid platform. The
+same register set is supported among different hybrid PMUs. Checking
+the first available one is good enough.
+
+With the patch on ADL-N:
+
+ $ perf record -I?
+ available registers: AX BX CX DX SI DI BP SP IP FLAGS CS SS R8 R9 R10
+ R11 R12 R13 R14 R15 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 XMM9
+ XMM10 XMM11 XMM12 XMM13 XMM14 XMM15
+
+Fixes: 6466ec14aaf44ff1 ("perf regs x86: Add X86 specific arch__intr_reg_mask()")
+Reported-by: Ammy Yi <ammy.yi@intel.com>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Acked-by: Ian Rogers <irogers@google.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
+Link: https://lore.kernel.org/r/20220518145125.1494156-1-kan.liang@linux.intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/arch/x86/util/perf_regs.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
+index 207c56805c55..0ed177991ad0 100644
+--- a/tools/perf/arch/x86/util/perf_regs.c
++++ b/tools/perf/arch/x86/util/perf_regs.c
+@@ -9,6 +9,8 @@
+ #include "../../../util/perf_regs.h"
+ #include "../../../util/debug.h"
+ #include "../../../util/event.h"
++#include "../../../util/pmu.h"
++#include "../../../util/pmu-hybrid.h"
+
+ const struct sample_reg sample_reg_masks[] = {
+ SMPL_REG(AX, PERF_REG_X86_AX),
+@@ -284,12 +286,22 @@ uint64_t arch__intr_reg_mask(void)
+ .disabled = 1,
+ .exclude_kernel = 1,
+ };
++ struct perf_pmu *pmu;
+ int fd;
+ /*
+ * In an unnamed union, init it here to build on older gcc versions
+ */
+ attr.sample_period = 1;
+
++ if (perf_pmu__has_hybrid()) {
++ /*
++ * The same register set is supported among different hybrid PMUs.
++ * Only check the first available one.
++ */
++ pmu = list_first_entry(&perf_pmu__hybrid_pmus, typeof(*pmu), hybrid_list);
++ attr.config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
++ }
++
+ event_attr_init(&attr);
+
+ fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+--
+2.35.1
+
diff --git a/queue-5.17/perf-stat-fix-and-validate-cpu-map-inputs-in-synthet.patch b/queue-5.17/perf-stat-fix-and-validate-cpu-map-inputs-in-synthet.patch
new file mode 100644
index 0000000..854688a
--- /dev/null
+++ b/queue-5.17/perf-stat-fix-and-validate-cpu-map-inputs-in-synthet.patch
@@ -0,0 +1,93 @@
+From f473626d9825cb552ac80d519489a96c1040d552 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 May 2022 20:20:01 -0700
+Subject: perf stat: Fix and validate CPU map inputs in synthetic
+ PERF_RECORD_STAT events
+
+From: Ian Rogers <irogers@google.com>
+
+[ Upstream commit 92d579ea3279aa87392b862df5810f0a7e30fcc6 ]
+
+Stat events can come from disk and so need a degree of validation. They
+contain a CPU which needs looking up via CPU map to access a counter.
+
+Add the CPU to index translation, alongside validity checking.
+
+Discussion thread:
+
+ https://lore.kernel.org/linux-perf-users/CAP-5=fWQR=sCuiSMktvUtcbOLidEpUJLCybVF6=BRvORcDOq+g@mail.gmail.com/
+
+Fixes: 7ac0089d138f80dc ("perf evsel: Pass cpu not cpu map index to synthesize")
+Reported-by: Michael Petlan <mpetlan@redhat.com>
+Suggested-by: Michael Petlan <mpetlan@redhat.com>
+Signed-off-by: Ian Rogers <irogers@google.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andrii Nakryiko <andrii@kernel.org>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Dave Marchevsky <davemarchevsky@fb.com>
+Cc: Ian Rogers <irogers@google.com>
+Cc: James Clark <james.clark@arm.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: KP Singh <kpsingh@kernel.org>
+Cc: Lv Ruyi <lv.ruyi@zte.com.cn>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Martin KaFai Lau <kafai@fb.com>
+Cc: Michael Petlan <mpetlan@redhat.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: netdev@vger.kernel.org
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Monnet <quentin@isovalent.com>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
+Cc: Yonghong Song <yhs@fb.com>
+Link: http://lore.kernel.org/lkml/20220519032005.1273691-2-irogers@google.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/stat.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
+index ee6f03481215..9c230b908b76 100644
+--- a/tools/perf/util/stat.c
++++ b/tools/perf/util/stat.c
+@@ -471,9 +471,10 @@ int perf_stat_process_counter(struct perf_stat_config *config,
+ int perf_event__process_stat_event(struct perf_session *session,
+ union perf_event *event)
+ {
+- struct perf_counts_values count;
++ struct perf_counts_values count, *ptr;
+ struct perf_record_stat *st = &event->stat;
+ struct evsel *counter;
++ int cpu_map_idx;
+
+ count.val = st->val;
+ count.ena = st->ena;
+@@ -484,8 +485,18 @@ int perf_event__process_stat_event(struct perf_session *session,
+ pr_err("Failed to resolve counter for stat event.\n");
+ return -EINVAL;
+ }
+-
+- *perf_counts(counter->counts, st->cpu, st->thread) = count;
++ cpu_map_idx = perf_cpu_map__idx(evsel__cpus(counter), (struct perf_cpu){.cpu = st->cpu});
++ if (cpu_map_idx == -1) {
++ pr_err("Invalid CPU %d for event %s.\n", st->cpu, evsel__name(counter));
++ return -EINVAL;
++ }
++ ptr = perf_counts(counter->counts, cpu_map_idx, st->thread);
++ if (ptr == NULL) {
++ pr_err("Failed to find perf count for CPU %d thread %d on event %s.\n",
++ st->cpu, st->thread, evsel__name(counter));
++ return -EINVAL;
++ }
++ *ptr = count;
+ counter->supported = true;
+ return 0;
+ }
+--
+2.35.1
+
diff --git a/queue-5.17/perf-test-bpf-skip-test-if-clang-is-not-present.patch b/queue-5.17/perf-test-bpf-skip-test-if-clang-is-not-present.patch
new file mode 100644
index 0000000..b3992a8
--- /dev/null
+++ b/queue-5.17/perf-test-bpf-skip-test-if-clang-is-not-present.patch
@@ -0,0 +1,127 @@
+From 4edaac11d7cc4bb9243e80bb13c7ff50eb7e1c2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 May 2022 17:24:38 +0530
+Subject: perf test bpf: Skip test if clang is not present
+
+From: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
+
+[ Upstream commit 8994e97be3eb3c3a7b59d6223018ffab8c272e2d ]
+
+Perf BPF filter test fails in environment where "clang" is not
+installed.
+
+Test failure logs:
+
+<<>>
+ 42: BPF filter :
+ 42.1: Basic BPF filtering : Skip
+ 42.2: BPF pinning : FAILED!
+ 42.3: BPF prologue generation : FAILED!
+<<>>
+
+Enabling verbose option provided debug logs which says clang/llvm needs
+to be installed. Snippet of verbose logs:
+
+<<>>
+ 42.2: BPF pinning :
+ --- start ---
+test child forked, pid 61423
+ERROR: unable to find clang.
+Hint: Try to install latest clang/llvm to support BPF.
+ Check your $PATH
+
+<<logs_here>>
+
+Failed to compile test case: 'Basic BPF llvm compile'
+Unable to get BPF object, fix kbuild first
+test child finished with -1
+ ---- end ----
+BPF filter subtest 2: FAILED!
+<<>>
+
+Here subtests, "BPF pinning" and "BPF prologue generation" failed and
+logs shows clang/llvm is needed. After installing clang, testcase
+passes.
+
+Reason on why subtest failure happens though logs has proper debug
+information:
+
+Main function __test__bpf calls test_llvm__fetch_bpf_obj by
+passing 4th argument as true ( 4th arguments maps to parameter
+"force" in test_llvm__fetch_bpf_obj ). But this will cause
+test_llvm__fetch_bpf_obj to skip the check for clang/llvm.
+
+Snippet of code part which checks for clang based on
+parameter "force" in test_llvm__fetch_bpf_obj:
+
+<<>>
+if (!force && (!llvm_param.user_set_param &&
+<<>>
+
+Since force is set to "false", test won't get skipped and fails to
+compile test case. The BPF code compilation needs clang, So pass the
+fourth argument as "false" and also skip the test if reason for return
+is "TEST_SKIP"
+
+After the patch:
+
+<<>>
+ 42: BPF filter :
+ 42.1: Basic BPF filtering : Skip
+ 42.2: BPF pinning : Skip
+ 42.3: BPF prologue generation : Skip
+<<>>
+
+Fixes: ba1fae431e74bb42 ("perf test: Add 'perf test BPF'")
+Reviewed-by: Kajol Jain <kjain@linux.ibm.com>
+Signed-off-by: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
+Acked-by: Ian Rogers <irogers@google.com>
+Cc: Disha Goel <disgoel@linux.vnet.ibm.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: linuxppc-dev@lists.ozlabs.org
+Cc: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Nageswara R Sastry <rnsastry@linux.ibm.com>
+Cc: Wang Nan <wangnan0@huawei.com>
+Link: https://lore.kernel.org/r/20220511115438.84032-1-atrajeev@linux.vnet.ibm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/tests/bpf.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
+index 573490530194..592ab02d5ba3 100644
+--- a/tools/perf/tests/bpf.c
++++ b/tools/perf/tests/bpf.c
+@@ -222,11 +222,11 @@ static int __test__bpf(int idx)
+
+ ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
+ bpf_testcase_table[idx].prog_id,
+- true, NULL);
++ false, NULL);
+ if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
+ pr_debug("Unable to get BPF object, %s\n",
+ bpf_testcase_table[idx].msg_compile_fail);
+- if (idx == 0)
++ if ((idx == 0) || (ret == TEST_SKIP))
+ return TEST_SKIP;
+ else
+ return TEST_FAIL;
+@@ -370,9 +370,11 @@ static int test__bpf_prologue_test(struct test_suite *test __maybe_unused,
+ static struct test_case bpf_tests[] = {
+ #ifdef HAVE_LIBBPF_SUPPORT
+ TEST_CASE("Basic BPF filtering", basic_bpf_test),
+- TEST_CASE("BPF pinning", bpf_pinning),
++ TEST_CASE_REASON("BPF pinning", bpf_pinning,
++ "clang isn't installed or environment missing BPF support"),
+ #ifdef HAVE_BPF_PROLOGUE
+- TEST_CASE("BPF prologue generation", bpf_prologue_test),
++ TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test,
++ "clang isn't installed or environment missing BPF support"),
+ #else
+ TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, "not compiled in"),
+ #endif
+--
+2.35.1
+
diff --git a/queue-5.17/perf-test-fix-all-pmu-test-to-skip-hv_24x7-hv_gpci-t.patch b/queue-5.17/perf-test-fix-all-pmu-test-to-skip-hv_24x7-hv_gpci-t.patch
new file mode 100644
index 0000000..ed78127
--- /dev/null
+++ b/queue-5.17/perf-test-fix-all-pmu-test-to-skip-hv_24x7-hv_gpci-t.patch
@@ -0,0 +1,66 @@
+From 773a7de040f954ab5165d8aca89d8a7047b00410 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 May 2022 15:42:36 +0530
+Subject: perf test: Fix "all PMU test" to skip hv_24x7/hv_gpci tests on
+ powerpc
+
+From: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
+
+[ Upstream commit 451ed8058c69a3fee29fa9e2967a4e22a221fe75 ]
+
+"perf all PMU test" picks the input events from "perf list --raw-dump
+pmu" list and runs "perf stat -e" for each of the event in the list. In
+case of powerpc, the PowerVM environment supports events from hv_24x7
+and hv_gpci PMU which is of example format like below:
+
+- hv_24x7/CPM_ADJUNCT_INST,domain=?,core=?/
+- hv_gpci/event,partition_id=?/
+
+The value for "?" needs to be filled in depending on system and
+respective event. CPM_ADJUNCT_INST needs have core value and domain
+value. hv_gpci event needs partition_id. Similarly, there are other
+events for hv_24x7 and hv_gpci having "?" in event format. Hence skip
+these events on powerpc platform since values like partition_id, domain
+is specific to system and event.
+
+Fixes: 3d5ac9effcc640d5 ("perf test: Workload test of all PMUs")
+Signed-off-by: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
+Acked-by: Ian Rogers <irogers@google.com>
+Cc: Disha Goel <disgoel@linux.vnet.ibm.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Kajol Jain <kjain@linux.ibm.com>
+Cc: linuxppc-dev@lists.ozlabs.org
+Cc: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Nageswara R Sastry <rnsastry@linux.ibm.com>
+Link: https://lore.kernel.org/r/20220520101236.17249-1-atrajeev@linux.vnet.ibm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/tests/shell/stat_all_pmu.sh | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
+index b30dba455f36..9c9ef33e0b3c 100755
+--- a/tools/perf/tests/shell/stat_all_pmu.sh
++++ b/tools/perf/tests/shell/stat_all_pmu.sh
+@@ -5,6 +5,16 @@
+ set -e
+
+ for p in $(perf list --raw-dump pmu); do
++ # In powerpc, skip the events for hv_24x7 and hv_gpci.
++ # These events needs input values to be filled in for
++ # core, chip, partition id based on system.
++ # Example: hv_24x7/CPM_ADJUNCT_INST,domain=?,core=?/
++ # hv_gpci/event,partition_id=?/
++ # Hence skip these events for ppc.
++ if echo "$p" |grep -Eq 'hv_24x7|hv_gpci' ; then
++ echo "Skipping: Event '$p' in powerpc"
++ continue
++ fi
+ echo "Testing $p"
+ result=$(perf stat -e "$p" true 2>&1)
+ if ! echo "$result" | grep -q "$p" && ! echo "$result" | grep -q "<not supported>" ; then
+--
+2.35.1
+
diff --git a/queue-5.17/pinctrl-mediatek-mt8365-fix-ies-control-pins.patch b/queue-5.17/pinctrl-mediatek-mt8365-fix-ies-control-pins.patch
new file mode 100644
index 0000000..66b85be
--- /dev/null
+++ b/queue-5.17/pinctrl-mediatek-mt8365-fix-ies-control-pins.patch
@@ -0,0 +1,45 @@
+From 859bc822a1eda18be1231b757e4973fa3f80a575 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Apr 2022 14:57:14 +0200
+Subject: pinctrl: mediatek: mt8365: fix IES control pins
+
+From: Mattijs Korpershoek <mkorpershoek@baylibre.com>
+
+[ Upstream commit f680058f406863b55ac226d1c157701939c63db4 ]
+
+IES26 (BIT 16 of IES1_CFG_ADDR) controls the following pads:
+
+- PAD_I2S_DATA_IN (GPIO114)
+- PAD_I2S_LRCK (GPIO115)
+- PAD_I2S_BCK (GPIO116)
+
+The pinctrl table is wrong since it lists pins 114 to 112.
+
+Update the table with the correct values.
+
+Fixes: e94d8b6fb83a ("pinctrl: mediatek: add support for mt8365 SoC")
+Reported-by: Youngmin Han <Youngmin.Han@geappliances.com>
+Signed-off-by: Mattijs Korpershoek <mkorpershoek@baylibre.com>
+Link: https://lore.kernel.org/r/20220426125714.298907-1-mkorpershoek@baylibre.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/mediatek/pinctrl-mt8365.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8365.c b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
+index 79b1fee5a1eb..ddee0db72d26 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mt8365.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
+@@ -259,7 +259,7 @@ static const struct mtk_pin_ies_smt_set mt8365_ies_set[] = {
+ MTK_PIN_IES_SMT_SPEC(104, 104, 0x420, 13),
+ MTK_PIN_IES_SMT_SPEC(105, 109, 0x420, 14),
+ MTK_PIN_IES_SMT_SPEC(110, 113, 0x420, 15),
+- MTK_PIN_IES_SMT_SPEC(114, 112, 0x420, 16),
++ MTK_PIN_IES_SMT_SPEC(114, 116, 0x420, 16),
+ MTK_PIN_IES_SMT_SPEC(117, 119, 0x420, 17),
+ MTK_PIN_IES_SMT_SPEC(120, 122, 0x420, 18),
+ MTK_PIN_IES_SMT_SPEC(123, 125, 0x420, 19),
+--
+2.35.1
+
diff --git a/queue-5.17/pinctrl-ocelot-fix-for-lan966x-alt-mode.patch b/queue-5.17/pinctrl-ocelot-fix-for-lan966x-alt-mode.patch
new file mode 100644
index 0000000..9d53778
--- /dev/null
+++ b/queue-5.17/pinctrl-ocelot-fix-for-lan966x-alt-mode.patch
@@ -0,0 +1,54 @@
+From 44b1774037c39c63dafae730fb688482f3d9885f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Apr 2022 21:29:18 +0200
+Subject: pinctrl: ocelot: Fix for lan966x alt mode
+
+From: Horatiu Vultur <horatiu.vultur@microchip.com>
+
+[ Upstream commit d3683eeb9d2b4aa5256f830721655ef2ee97e324 ]
+
+For lan966x, the GPIO 35 has the wrong function for alternate mode 2.
+The mode is not none but is PTP sync.
+
+Fixes: 531d6ab36571c2 ("pinctrl: ocelot: Extend support for lan966x")
+Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
+Reviewed-by: Kavyasree Kotagiri <kavyasree.kotagiri@microchip.com>
+Link: https://lore.kernel.org/r/20220413192918.3777234-1-horatiu.vultur@microchip.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/pinctrl-ocelot.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
+index 370459243007..61e3844cddbf 100644
+--- a/drivers/pinctrl/pinctrl-ocelot.c
++++ b/drivers/pinctrl/pinctrl-ocelot.c
+@@ -129,6 +129,7 @@ enum {
+ FUNC_PTP1,
+ FUNC_PTP2,
+ FUNC_PTP3,
++ FUNC_PTPSYNC_0,
+ FUNC_PTPSYNC_1,
+ FUNC_PTPSYNC_2,
+ FUNC_PTPSYNC_3,
+@@ -252,6 +253,7 @@ static const char *const ocelot_function_names[] = {
+ [FUNC_PTP1] = "ptp1",
+ [FUNC_PTP2] = "ptp2",
+ [FUNC_PTP3] = "ptp3",
++ [FUNC_PTPSYNC_0] = "ptpsync_0",
+ [FUNC_PTPSYNC_1] = "ptpsync_1",
+ [FUNC_PTPSYNC_2] = "ptpsync_2",
+ [FUNC_PTPSYNC_3] = "ptpsync_3",
+@@ -891,7 +893,7 @@ LAN966X_P(31, GPIO, FC3_c, CAN1, NONE, OB_TRG, RECO_b, NON
+ LAN966X_P(32, GPIO, FC3_c, NONE, SGPIO_a, NONE, MIIM_Sa, NONE, R);
+ LAN966X_P(33, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
+ LAN966X_P(34, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
+-LAN966X_P(35, GPIO, FC1_b, NONE, SGPIO_a, CAN0_b, NONE, NONE, R);
++LAN966X_P(35, GPIO, FC1_b, PTPSYNC_0, SGPIO_a, CAN0_b, NONE, NONE, R);
+ LAN966X_P(36, GPIO, NONE, PTPSYNC_1, NONE, CAN0_b, NONE, NONE, R);
+ LAN966X_P(37, GPIO, FC_SHRD0, PTPSYNC_2, TWI_SLC_GATE_AD, NONE, NONE, NONE, R);
+ LAN966X_P(38, GPIO, NONE, PTPSYNC_3, NONE, NONE, NONE, NONE, R);
+--
+2.35.1
+
diff --git a/queue-5.17/pinctrl-pinctrl-aspeed-g6-remove-fwqspid-group-in-pi.patch b/queue-5.17/pinctrl-pinctrl-aspeed-g6-remove-fwqspid-group-in-pi.patch
new file mode 100644
index 0000000..f7466d6
--- /dev/null
+++ b/queue-5.17/pinctrl-pinctrl-aspeed-g6-remove-fwqspid-group-in-pi.patch
@@ -0,0 +1,71 @@
+From 8fb23352e0542d28b0b5799f154e488b80ae2400 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Mar 2022 10:39:27 -0700
+Subject: pinctrl: pinctrl-aspeed-g6: remove FWQSPID group in pinctrl
+
+From: Jae Hyun Yoo <quic_jaehyoo@quicinc.com>
+
+[ Upstream commit 3eef2f48ba0933ba995529f522554ad5c276c39b ]
+
+FWSPIDQ2 and FWSPIDQ3 are not part of FWSPI18 interface so remove
+FWQSPID group in pinctrl. These pins must be used with the FWSPI
+pins that are dedicated for boot SPI interface which provides
+same 3.3v logic level.
+
+Fixes: 2eda1cdec49f ("pinctrl: aspeed: Add AST2600 pinmux support")
+Signed-off-by: Jae Hyun Yoo <quic_jaehyoo@quicinc.com>
+Reviewed-by: Andrew Jeffery <andrew@aj.id.au>
+Link: https://lore.kernel.org/r/20220329173932.2588289-3-quic_jaehyoo@quicinc.com
+Signed-off-by: Joel Stanley <joel@jms.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c | 14 +++-----------
+ 1 file changed, 3 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+index a3fa03bcd9a3..54064714d73f 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+@@ -1236,18 +1236,12 @@ FUNC_GROUP_DECL(SALT8, AA12);
+ FUNC_GROUP_DECL(WDTRST4, AA12);
+
+ #define AE12 196
+-SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID,
+- SIG_DESC_SET(SCU438, 4));
+ SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4);
+-PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2),
+- SIG_EXPR_LIST_PTR(AE12, GPIOY4));
++PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, GPIOY4));
+
+ #define AF12 197
+-SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID,
+- SIG_DESC_SET(SCU438, 5));
+ SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5);
+-PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3),
+- SIG_EXPR_LIST_PTR(AF12, GPIOY5));
++PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, GPIOY5));
+
+ #define AC12 198
+ SSSF_PIN_DECL(AC12, GPIOY6, FWSPIABR, SIG_DESC_SET(SCU438, 6));
+@@ -1520,9 +1514,8 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
+ PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
+
+ GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
+-GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
+ GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
+-FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
++FUNC_DECL_1(FWSPID, FWSPID);
+ FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
+ FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
+ /*
+@@ -1918,7 +1911,6 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
+ ASPEED_PINCTRL_GROUP(FSI2),
+ ASPEED_PINCTRL_GROUP(FWSPIABR),
+ ASPEED_PINCTRL_GROUP(FWSPID),
+- ASPEED_PINCTRL_GROUP(FWQSPID),
+ ASPEED_PINCTRL_GROUP(FWSPIWP),
+ ASPEED_PINCTRL_GROUP(GPIT0),
+ ASPEED_PINCTRL_GROUP(GPIT1),
+--
+2.35.1
+
diff --git a/queue-5.17/platform-surface-gpe-add-support-for-surface-pro-8.patch b/queue-5.17/platform-surface-gpe-add-support-for-surface-pro-8.patch
new file mode 100644
index 0000000..7c251c5
--- /dev/null
+++ b/queue-5.17/platform-surface-gpe-add-support-for-surface-pro-8.patch
@@ -0,0 +1,43 @@
+From 2358a2f8fc09fe6c39c1f95d02abff4556c10f51 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Apr 2022 20:00:49 +0200
+Subject: platform/surface: gpe: Add support for Surface Pro 8
+
+From: Maximilian Luz <luzmaximilian@gmail.com>
+
+[ Upstream commit ed13d4ac57474d959c40fd05d8860e2b1607becb ]
+
+The new Surface Pro 8 uses GPEs for lid events as well. Add an entry for
+that so that the lid can be used to wake the device. Note that this is a
+device with a keyboard type-cover, where this acts as the "lid".
+
+Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
+Link: https://lore.kernel.org/r/20220429180049.1282447-1-luzmaximilian@gmail.com
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/surface/surface_gpe.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
+index c1775db29efb..ec66fde28e75 100644
+--- a/drivers/platform/surface/surface_gpe.c
++++ b/drivers/platform/surface/surface_gpe.c
+@@ -99,6 +99,14 @@ static const struct dmi_system_id dmi_lid_device_table[] = {
+ },
+ .driver_data = (void *)lid_device_props_l4D,
+ },
++ {
++ .ident = "Surface Pro 8",
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 8"),
++ },
++ .driver_data = (void *)lid_device_props_l4B,
++ },
+ {
+ .ident = "Surface Book 1",
+ .matches = {
+--
+2.35.1
+
diff --git a/queue-5.17/platform-x86-intel-fix-rmmod-pmt_telemetry-panic.patch b/queue-5.17/platform-x86-intel-fix-rmmod-pmt_telemetry-panic.patch
new file mode 100644
index 0000000..e6d285c
--- /dev/null
+++ b/queue-5.17/platform-x86-intel-fix-rmmod-pmt_telemetry-panic.patch
@@ -0,0 +1,95 @@
+From de277bd10fd6156d82ee7452b19724b46b694ad9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Apr 2022 08:23:22 -0400
+Subject: platform/x86/intel: Fix 'rmmod pmt_telemetry' panic
+
+From: Prarit Bhargava <prarit@redhat.com>
+
+[ Upstream commit 2cdfa0c20d58da3757054797c2974c967035926a ]
+
+'rmmod pmt_telemetry' panics with:
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000040
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 0 P4D 0
+ Oops: 0000 [#1] PREEMPT SMP NOPTI
+ CPU: 4 PID: 1697 Comm: rmmod Tainted: G S W -------- --- 5.18.0-rc4 #3
+ Hardware name: Intel Corporation Alder Lake Client Platform/AlderLake-P DDR5 RVP, BIOS ADLPFWI1.R00.3056.B00.2201310233 01/31/2022
+ RIP: 0010:device_del+0x1b/0x3d0
+ Code: e8 1a d9 e9 ff e9 58 ff ff ff 48 8b 08 eb dc 0f 1f 44 00 00 41 56 41 55 41 54 55 48 8d af 80 00 00 00 53 48 89 fb 48 83 ec 18 <4c> 8b 67 40 48 89 ef 65 48 8b 04 25 28 00 00 00 48 89 44 24 10 31
+ RSP: 0018:ffffb520415cfd60 EFLAGS: 00010286
+ RAX: 0000000000000070 RBX: 0000000000000000 RCX: 0000000000000000
+ RDX: 0000000000000001 RSI: 0000000000000000 RDI: 0000000000000000
+ RBP: 0000000000000080 R08: ffffffffffffffff R09: ffffb520415cfd78
+ R10: 0000000000000002 R11: ffffb520415cfd78 R12: 0000000000000000
+ R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+ FS: 00007f7e198e5740(0000) GS:ffff905c9f700000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000040 CR3: 000000010782a005 CR4: 0000000000770ee0
+ PKRU: 55555554
+ Call Trace:
+ <TASK>
+ ? __xa_erase+0x53/0xb0
+ device_unregister+0x13/0x50
+ intel_pmt_dev_destroy+0x34/0x60 [pmt_class]
+ pmt_telem_remove+0x40/0x50 [pmt_telemetry]
+ auxiliary_bus_remove+0x18/0x30
+ device_release_driver_internal+0xc1/0x150
+ driver_detach+0x44/0x90
+ bus_remove_driver+0x74/0xd0
+ auxiliary_driver_unregister+0x12/0x20
+ pmt_telem_exit+0xc/0xe4a [pmt_telemetry]
+ __x64_sys_delete_module+0x13a/0x250
+ ? syscall_trace_enter.isra.19+0x11e/0x1a0
+ do_syscall_64+0x58/0x80
+ ? syscall_exit_to_user_mode+0x12/0x30
+ ? do_syscall_64+0x67/0x80
+ ? syscall_exit_to_user_mode+0x12/0x30
+ ? do_syscall_64+0x67/0x80
+ ? syscall_exit_to_user_mode+0x12/0x30
+ ? do_syscall_64+0x67/0x80
+ ? exc_page_fault+0x64/0x140
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+ RIP: 0033:0x7f7e1803a05b
+ Code: 73 01 c3 48 8b 0d 2d 4e 38 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa b8 b0 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d fd 4d 38 00 f7 d8 64 89 01 48
+
+The probe function, pmt_telem_probe(), adds an entry for devices even if
+they have not been initialized. This results in the array of initialized
+devices containing both initialized and uninitialized entries. This
+causes a panic in the remove function, pmt_telem_remove() which expects
+the array to only contain initialized entries.
+
+Only use an entry when a device is initialized.
+
+Cc: "David E. Box" <david.e.box@linux.intel.com>
+Cc: Hans de Goede <hdegoede@redhat.com>
+Cc: Mark Gross <markgross@kernel.org>
+Cc: platform-driver-x86@vger.kernel.org
+Signed-off-by: David Arcari <darcari@redhat.com>
+Signed-off-by: Prarit Bhargava <prarit@redhat.com>
+Reviewed-by: David E. Box <david.e.box@linux.intel.com>
+Link: https://lore.kernel.org/r/20220429122322.2550003-1-prarit@redhat.com
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/intel/pmt/telemetry.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
+index 6b6f3e2a617a..f73ecfd4a309 100644
+--- a/drivers/platform/x86/intel/pmt/telemetry.c
++++ b/drivers/platform/x86/intel/pmt/telemetry.c
+@@ -103,7 +103,7 @@ static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxilia
+ auxiliary_set_drvdata(auxdev, priv);
+
+ for (i = 0; i < intel_vsec_dev->num_resources; i++) {
+- struct intel_pmt_entry *entry = &priv->entry[i];
++ struct intel_pmt_entry *entry = &priv->entry[priv->num_entries];
+
+ ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i);
+ if (ret < 0)
+--
+2.35.1
+
diff --git a/queue-5.17/platform-x86-thinkpad_acpi-convert-btusb-dmi-list-to.patch b/queue-5.17/platform-x86-thinkpad_acpi-convert-btusb-dmi-list-to.patch
new file mode 100644
index 0000000..ad95642
--- /dev/null
+++ b/queue-5.17/platform-x86-thinkpad_acpi-convert-btusb-dmi-list-to.patch
@@ -0,0 +1,137 @@
+From acfcca17c46a4e95afaa792d54ee2f71148da2ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Apr 2022 22:05:00 -0500
+Subject: platform/x86: thinkpad_acpi: Convert btusb DMI list to quirks
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit c25d7f32e3e209462cd82e6e93e66b72dbb2308f ]
+
+DMI matching in thinkpad_acpi happens local to a function meaning
+quirks can only match that function.
+
+Future changes to thinkpad_acpi may need to quirk other code, so
+change this to use a quirk infrastructure.
+
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Tested-by: Mark Pearson <markpearson@lenvo.com>
+Link: https://lore.kernel.org/r/20220429030501.1909-2-mario.limonciello@amd.com
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/thinkpad_acpi.c | 26 ++++++++++++++++++++++++--
+ 1 file changed, 24 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 3fb8cda31eb9..c43586f1cb4b 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -309,6 +309,15 @@ struct ibm_init_struct {
+ struct ibm_struct *data;
+ };
+
++/* DMI Quirks */
++struct quirk_entry {
++ bool btusb_bug;
++};
++
++static struct quirk_entry quirk_btusb_bug = {
++ .btusb_bug = true,
++};
++
+ static struct {
+ u32 bluetooth:1;
+ u32 hotkey:1;
+@@ -338,6 +347,7 @@ static struct {
+ u32 hotkey_poll_active:1;
+ u32 has_adaptive_kbd:1;
+ u32 kbd_lang:1;
++ struct quirk_entry *quirks;
+ } tp_features;
+
+ static struct {
+@@ -4361,9 +4371,10 @@ static void bluetooth_exit(void)
+ bluetooth_shutdown();
+ }
+
+-static const struct dmi_system_id bt_fwbug_list[] __initconst = {
++static const struct dmi_system_id fwbug_list[] __initconst = {
+ {
+ .ident = "ThinkPad E485",
++ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20KU"),
+@@ -4371,6 +4382,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+ },
+ {
+ .ident = "ThinkPad E585",
++ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20KV"),
+@@ -4378,6 +4390,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+ },
+ {
+ .ident = "ThinkPad A285 - 20MW",
++ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MW"),
+@@ -4385,6 +4398,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+ },
+ {
+ .ident = "ThinkPad A285 - 20MX",
++ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MX"),
+@@ -4392,6 +4406,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+ },
+ {
+ .ident = "ThinkPad A485 - 20MU",
++ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MU"),
+@@ -4399,6 +4414,7 @@ static const struct dmi_system_id bt_fwbug_list[] __initconst = {
+ },
+ {
+ .ident = "ThinkPad A485 - 20MV",
++ .driver_data = &quirk_btusb_bug,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "20MV"),
+@@ -4421,7 +4437,8 @@ static int __init have_bt_fwbug(void)
+ * Some AMD based ThinkPads have a firmware bug that calling
+ * "GBDC" will cause bluetooth on Intel wireless cards blocked
+ */
+- if (dmi_check_system(bt_fwbug_list) && pci_dev_present(fwbug_cards_ids)) {
++ if (tp_features.quirks && tp_features.quirks->btusb_bug &&
++ pci_dev_present(fwbug_cards_ids)) {
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
+ FW_BUG "disable bluetooth subdriver for Intel cards\n");
+ return 1;
+@@ -11438,6 +11455,7 @@ static void thinkpad_acpi_module_exit(void)
+
+ static int __init thinkpad_acpi_module_init(void)
+ {
++ const struct dmi_system_id *dmi_id;
+ int ret, i;
+
+ tpacpi_lifecycle = TPACPI_LIFE_INIT;
+@@ -11477,6 +11495,10 @@ static int __init thinkpad_acpi_module_init(void)
+ return -ENODEV;
+ }
+
++ dmi_id = dmi_first_match(fwbug_list);
++ if (dmi_id)
++ tp_features.quirks = dmi_id->driver_data;
++
+ /* Device initialization */
+ tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1,
+ NULL, 0);
+--
+2.35.1
+
diff --git a/queue-5.17/platform-x86-thinkpad_acpi-correct-dual-fan-probe.patch b/queue-5.17/platform-x86-thinkpad_acpi-correct-dual-fan-probe.patch
new file mode 100644
index 0000000..de4fa1c
--- /dev/null
+++ b/queue-5.17/platform-x86-thinkpad_acpi-correct-dual-fan-probe.patch
@@ -0,0 +1,71 @@
+From 2bd1c7e4632733c08cfb2aa43724a21685e7ec8a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 May 2022 15:12:00 -0400
+Subject: platform/x86: thinkpad_acpi: Correct dual fan probe
+
+From: Mark Pearson <markpearson@lenovo.com>
+
+[ Upstream commit aa2fef6f40e6ccc22e932b36898f260f0e5a021a ]
+
+There was an issue with the dual fan probe whereby the probe was
+failing as it assuming that second_fan support was not available.
+
+Corrected the logic so the probe works correctly. Cleaned up so
+quirks only used if 2nd fan not detected.
+
+Tested on X1 Carbon 10 (2 fans), X1 Carbon 9 (2 fans) and T490 (1 fan)
+
+Signed-off-by: Mark Pearson <markpearson@lenovo.com>
+Link: https://lore.kernel.org/r/20220502191200.63470-1-markpearson@lenovo.com
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/thinkpad_acpi.c | 23 +++++++++++++----------
+ 1 file changed, 13 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index c43586f1cb4b..0ea71416d292 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -8766,24 +8766,27 @@ static int __init fan_init(struct ibm_init_struct *iibm)
+ fan_status_access_mode = TPACPI_FAN_RD_TPEC;
+ if (quirks & TPACPI_FAN_Q1)
+ fan_quirk1_setup();
+- if (quirks & TPACPI_FAN_2FAN) {
+- tp_features.second_fan = 1;
+- pr_info("secondary fan support enabled\n");
+- }
+- if (quirks & TPACPI_FAN_2CTL) {
+- tp_features.second_fan = 1;
+- tp_features.second_fan_ctl = 1;
+- pr_info("secondary fan control enabled\n");
+- }
+ /* Try and probe the 2nd fan */
++ tp_features.second_fan = 1; /* needed for get_speed to work */
+ res = fan2_get_speed(&speed);
+ if (res >= 0) {
+ /* It responded - so let's assume it's there */
+ tp_features.second_fan = 1;
+ tp_features.second_fan_ctl = 1;
+ pr_info("secondary fan control detected & enabled\n");
++ } else {
++ /* Fan not auto-detected */
++ tp_features.second_fan = 0;
++ if (quirks & TPACPI_FAN_2FAN) {
++ tp_features.second_fan = 1;
++ pr_info("secondary fan support enabled\n");
++ }
++ if (quirks & TPACPI_FAN_2CTL) {
++ tp_features.second_fan = 1;
++ tp_features.second_fan_ctl = 1;
++ pr_info("secondary fan control enabled\n");
++ }
+ }
+-
+ } else {
+ pr_err("ThinkPad ACPI EC access misbehaving, fan status and control unavailable\n");
+ return -ENODEV;
+--
+2.35.1
+
diff --git a/queue-5.17/ptp-ocp-have-adjtime-handle-negative-delta_ns-correc.patch b/queue-5.17/ptp-ocp-have-adjtime-handle-negative-delta_ns-correc.patch
new file mode 100644
index 0000000..914154e
--- /dev/null
+++ b/queue-5.17/ptp-ocp-have-adjtime-handle-negative-delta_ns-correc.patch
@@ -0,0 +1,50 @@
+From 0ec7ee6ee82f9d3aa4c4234283dea94e77df2867 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 May 2022 15:52:31 -0700
+Subject: ptp: ocp: have adjtime handle negative delta_ns correctly
+
+From: Jonathan Lemon <jonathan.lemon@gmail.com>
+
+[ Upstream commit da2172a9bfec858ceeb0271b9d444378490398c8 ]
+
+delta_ns is a s64, but it was being passed ptp_ocp_adjtime_coarse
+as an u64. Also, it turns out that timespec64_add_ns() only handles
+positive values, so perform the math with set_normalized_timespec().
+
+Fixes: 90f8f4c0e3ce ("ptp: ocp: Add ptp_ocp_adjtime_coarse for large adjustments")
+Suggested-by: Vadim Fedorenko <vfedorenko@novek.ru>
+Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
+Acked-by: Vadim Fedorenko <vfedorenko@novek.ru>
+Link: https://lore.kernel.org/r/20220513225231.1412-1-jonathan.lemon@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ptp/ptp_ocp.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
+index 17ad5f0d13b2..6585789ed695 100644
+--- a/drivers/ptp/ptp_ocp.c
++++ b/drivers/ptp/ptp_ocp.c
+@@ -625,7 +625,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val)
+ }
+
+ static void
+-ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
++ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, s64 delta_ns)
+ {
+ struct timespec64 ts;
+ unsigned long flags;
+@@ -634,7 +634,8 @@ ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
+ spin_lock_irqsave(&bp->lock, flags);
+ err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
+ if (likely(!err)) {
+- timespec64_add_ns(&ts, delta_ns);
++ set_normalized_timespec64(&ts, ts.tv_sec,
++ ts.tv_nsec + delta_ns);
+ __ptp_ocp_settime_locked(bp, &ts);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+--
+2.35.1
+
diff --git a/queue-5.17/revert-fbdev-make-fb_release-return-enodev-if-fbdev-.patch b/queue-5.17/revert-fbdev-make-fb_release-return-enodev-if-fbdev-.patch
new file mode 100644
index 0000000..259a9c5
--- /dev/null
+++ b/queue-5.17/revert-fbdev-make-fb_release-return-enodev-if-fbdev-.patch
@@ -0,0 +1,57 @@
+From 5fa79a6d23839675f1bc1026be450c68b9bc4131 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 May 2022 13:59:17 +0200
+Subject: Revert "fbdev: Make fb_release() return -ENODEV if fbdev was
+ unregistered"
+
+From: Javier Martinez Canillas <javierm@redhat.com>
+
+[ Upstream commit 135332f34ba2662bc1e32b5c612e06a8cc41a053 ]
+
+This reverts commit aafa025c76dcc7d1a8c8f0bdefcbe4eb480b2f6a. That commit
+attempted to fix a NULL pointer dereference, caused by the struct fb_info
+associated with a framebuffer device to not longer be valid when the file
+descriptor was closed.
+
+The issue was exposed by commit 27599aacbaef ("fbdev: Hot-unplug firmware
+fb devices on forced removal"), which added a new path that goes through
+the struct device removal instead of directly unregistering the fb.
+
+Most fbdev drivers have issues with the fb_info lifetime, because call to
+framebuffer_release() from their driver's .remove callback, rather than
+doing from fbops.fb_destroy callback. This meant that due to this switch,
+the fb_info was now destroyed too early, while references still existed,
+while before it was simply leaked.
+
+The patch we're reverting here reinstated that leak, hence "fixed" the
+regression. But the proper solution is to fix the drivers to not release
+the fb_info too soon.
+
+Suggested-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220504115917.758787-1-javierm@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/video/fbdev/core/fbmem.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index 10a9369c9dea..00f0f282e7a1 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1438,10 +1438,7 @@ fb_release(struct inode *inode, struct file *file)
+ __acquires(&info->lock)
+ __releases(&info->lock)
+ {
+- struct fb_info * const info = file_fb_info(file);
+-
+- if (!info)
+- return -ENODEV;
++ struct fb_info * const info = file->private_data;
+
+ lock_fb_info(info);
+ if (info->fbops->fb_release)
+--
+2.35.1
+
diff --git a/queue-5.17/revert-pci-aardvark-rewrite-irq-code-to-chained-irq-.patch b/queue-5.17/revert-pci-aardvark-rewrite-irq-code-to-chained-irq-.patch
new file mode 100644
index 0000000..08884a8
--- /dev/null
+++ b/queue-5.17/revert-pci-aardvark-rewrite-irq-code-to-chained-irq-.patch
@@ -0,0 +1,135 @@
+From 1254058fad380e5e123541aa4337c2ca586dffe3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 15 May 2022 14:58:15 +0200
+Subject: Revert "PCI: aardvark: Rewrite IRQ code to chained IRQ handler"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pali Rohár <pali@kernel.org>
+
+[ Upstream commit a3b69dd0ad6265c29c4b6fb381cd76fb3bebdf8c ]
+
+This reverts commit 1571d67dc190e50c6c56e8f88cdc39f7cc53166e.
+
+This commit broke support for setting interrupt affinity. It looks like
+that it is related to the chained IRQ handler. Revert this commit until
+issue with setting interrupt affinity is fixed.
+
+Fixes: 1571d67dc190 ("PCI: aardvark: Rewrite IRQ code to chained IRQ handler")
+Link: https://lore.kernel.org/r/20220515125815.30157-1-pali@kernel.org
+Signed-off-by: Pali Rohár <pali@kernel.org>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/pci-aardvark.c | 48 ++++++++++++---------------
+ 1 file changed, 22 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
+index 5be382b19d9a..27169c023180 100644
+--- a/drivers/pci/controller/pci-aardvark.c
++++ b/drivers/pci/controller/pci-aardvark.c
+@@ -272,7 +272,6 @@ struct advk_pcie {
+ u32 actions;
+ } wins[OB_WIN_COUNT];
+ u8 wins_count;
+- int irq;
+ struct irq_domain *rp_irq_domain;
+ struct irq_domain *irq_domain;
+ struct irq_chip irq_chip;
+@@ -1570,26 +1569,21 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
+ }
+ }
+
+-static void advk_pcie_irq_handler(struct irq_desc *desc)
++static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
+ {
+- struct advk_pcie *pcie = irq_desc_get_handler_data(desc);
+- struct irq_chip *chip = irq_desc_get_chip(desc);
+- u32 val, mask, status;
++ struct advk_pcie *pcie = arg;
++ u32 status;
+
+- chained_irq_enter(chip, desc);
++ status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
++ if (!(status & PCIE_IRQ_CORE_INT))
++ return IRQ_NONE;
+
+- val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
+- mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG);
+- status = val & ((~mask) & PCIE_IRQ_ALL_MASK);
++ advk_pcie_handle_int(pcie);
+
+- if (status & PCIE_IRQ_CORE_INT) {
+- advk_pcie_handle_int(pcie);
++ /* Clear interrupt */
++ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
+
+- /* Clear interrupt */
+- advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
+- }
+-
+- chained_irq_exit(chip, desc);
++ return IRQ_HANDLED;
+ }
+
+ static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+@@ -1671,7 +1665,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
+ struct advk_pcie *pcie;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
+- int ret;
++ int ret, irq;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
+ if (!bridge)
+@@ -1757,9 +1751,17 @@ static int advk_pcie_probe(struct platform_device *pdev)
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+- pcie->irq = platform_get_irq(pdev, 0);
+- if (pcie->irq < 0)
+- return pcie->irq;
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0)
++ return irq;
++
++ ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
++ IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
++ pcie);
++ if (ret) {
++ dev_err(dev, "Failed to register interrupt\n");
++ return ret;
++ }
+
+ pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
+ "reset-gpios", 0,
+@@ -1816,15 +1818,12 @@ static int advk_pcie_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie);
+-
+ bridge->sysdata = pcie;
+ bridge->ops = &advk_pcie_ops;
+ bridge->map_irq = advk_pcie_map_irq;
+
+ ret = pci_host_probe(bridge);
+ if (ret < 0) {
+- irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+ advk_pcie_remove_rp_irq_domain(pcie);
+ advk_pcie_remove_msi_irq_domain(pcie);
+ advk_pcie_remove_irq_domain(pcie);
+@@ -1873,9 +1872,6 @@ static int advk_pcie_remove(struct platform_device *pdev)
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
+ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
+
+- /* Remove IRQ handler */
+- irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+-
+ /* Remove IRQ domains */
+ advk_pcie_remove_rp_irq_domain(pcie);
+ advk_pcie_remove_msi_irq_domain(pcie);
+--
+2.35.1
+
diff --git a/queue-5.17/riscv-dts-sifive-fu540-c000-align-dma-node-name-with.patch b/queue-5.17/riscv-dts-sifive-fu540-c000-align-dma-node-name-with.patch
new file mode 100644
index 0000000..44d2ff2
--- /dev/null
+++ b/queue-5.17/riscv-dts-sifive-fu540-c000-align-dma-node-name-with.patch
@@ -0,0 +1,38 @@
+From e94f819bac2949208deb4851b10b8011a997209c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Apr 2022 21:38:56 +0200
+Subject: riscv: dts: sifive: fu540-c000: align dma node name with dtschema
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+
+[ Upstream commit b17410182b6f98191fbf7f42d3b4a78512769d29 ]
+
+Fixes dtbs_check warnings like:
+
+ dma@3000000: $nodename:0: 'dma@3000000' does not match '^dma-controller(@.*)?$'
+
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+Link: https://lore.kernel.org/r/20220407193856.18223-1-krzysztof.kozlowski@linaro.org
+Fixes: c5ab54e9945b ("riscv: dts: add support for PDMA device of HiFive Unleashed Rev A00")
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/boot/dts/sifive/fu540-c000.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+index 3eef52b1a59b..fd93fdadd28c 100644
+--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
++++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+@@ -167,7 +167,7 @@ uart0: serial@10010000 {
+ clocks = <&prci PRCI_CLK_TLCLK>;
+ status = "disabled";
+ };
+- dma: dma@3000000 {
++ dma: dma-controller@3000000 {
+ compatible = "sifive,fu540-c000-pdma";
+ reg = <0x0 0x3000000 0x0 0x8000>;
+ interrupt-parent = <&plic0>;
+--
+2.35.1
+
diff --git a/queue-5.17/scsi-qla2xxx-fix-missed-dma-unmap-for-aborted-comman.patch b/queue-5.17/scsi-qla2xxx-fix-missed-dma-unmap-for-aborted-comman.patch
new file mode 100644
index 0000000..98b2843
--- /dev/null
+++ b/queue-5.17/scsi-qla2xxx-fix-missed-dma-unmap-for-aborted-comman.patch
@@ -0,0 +1,51 @@
+From f29c3587b51d999fbcf0e8039ab67ef35a64212b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Apr 2022 12:42:29 +0000
+Subject: scsi: qla2xxx: Fix missed DMA unmap for aborted commands
+
+From: Gleb Chesnokov <Chesnokov.G@raidix.com>
+
+[ Upstream commit 26f9ce53817a8fd84b69a73473a7de852a24c897 ]
+
+Aborting commands that have already been sent to the firmware can
+cause BUG in qlt_free_cmd(): BUG_ON(cmd->sg_mapped)
+
+For instance:
+
+ - Command passes rdx_to_xfer state, maps sgl, sends to the firmware
+
+ - Reset occurs, qla2xxx performs ISP error recovery, aborts the command
+
+ - Target stack calls qlt_abort_cmd() and then qlt_free_cmd()
+
+ - BUG_ON(cmd->sg_mapped) in qlt_free_cmd() occurs because sgl was not
+ unmapped
+
+Thus, unmap sgl in qlt_abort_cmd() for commands with the aborted flag set.
+
+Link: https://lore.kernel.org/r/AS8PR10MB4952D545F84B6B1DFD39EC1E9DEE9@AS8PR10MB4952.EURPRD10.PROD.OUTLOOK.COM
+Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
+Signed-off-by: Gleb Chesnokov <Chesnokov.G@raidix.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qla2xxx/qla_target.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index b109716d44fb..7ab3c9e4d478 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -3837,6 +3837,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
+
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if (cmd->aborted) {
++ if (cmd->sg_mapped)
++ qlt_unmap_sg(vha, cmd);
++
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ /*
+ * It's normal to see 2 calls in this path:
+--
+2.35.1
+
diff --git a/queue-5.17/scsi-scsi_dh_alua-properly-handle-the-alua-transitio.patch b/queue-5.17/scsi-scsi_dh_alua-properly-handle-the-alua-transitio.patch
new file mode 100644
index 0000000..b9ddb21
--- /dev/null
+++ b/queue-5.17/scsi-scsi_dh_alua-properly-handle-the-alua-transitio.patch
@@ -0,0 +1,59 @@
+From e2425df3796f1ebf7db0321820cb6e68b0a29f6a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 May 2022 08:09:17 -0700
+Subject: scsi: scsi_dh_alua: Properly handle the ALUA transitioning state
+
+From: Brian Bunker <brian@purestorage.com>
+
+[ Upstream commit 6056a92ceb2a7705d61df7ec5370548e96aee258 ]
+
+The handling of the ALUA transitioning state is currently broken. When a
+target goes into this state, it is expected that the target is allowed to
+stay in this state for the implicit transition timeout without a path
+failure. The handler has this logic, but it gets skipped currently.
+
+When the target transitions, there is in-flight I/O from the initiator. The
+first of these responses from the target will be a unit attention letting
+the initiator know that the ALUA state has changed. The remaining
+in-flight I/Os, before the initiator finds out that the portal state has
+changed, will return not ready, ALUA state is transitioning. The portal
+state will change to SCSI_ACCESS_STATE_TRANSITIONING. This will lead to all
+new I/O immediately failing the path unexpectedly. The path failure happens
+in less than a second instead of the expected successes until the
+transition timer is exceeded.
+
+Allow I/Os to continue while the path is in the ALUA transitioning
+state. The handler already takes care of a target that stays in the
+transitioning state for too long by changing the state to ALUA state
+standby once the transition timeout is exceeded at which point the path
+will fail.
+
+Link: https://lore.kernel.org/r/CAHZQxy+4sTPz9+pY3=7VJH+CLUJsDct81KtnR2be8ycN5mhqTg@mail.gmail.com
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Acked-by: Krishna Kant <krishna.kant@purestorage.com>
+Acked-by: Seamus Connor <sconnor@purestorage.com>
+Signed-off-by: Brian Bunker <brian@purestorage.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/device_handler/scsi_dh_alua.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 37d06f993b76..1d9be771f3ee 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -1172,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
+ case SCSI_ACCESS_STATE_OPTIMAL:
+ case SCSI_ACCESS_STATE_ACTIVE:
+ case SCSI_ACCESS_STATE_LBA:
+- return BLK_STS_OK;
+ case SCSI_ACCESS_STATE_TRANSITIONING:
+- return BLK_STS_AGAIN;
++ return BLK_STS_OK;
+ default:
+ req->rq_flags |= RQF_QUIET;
+ return BLK_STS_IOERR;
+--
+2.35.1
+
diff --git a/queue-5.17/scsi-ufs-core-fix-referencing-invalid-rsp-field.patch b/queue-5.17/scsi-ufs-core-fix-referencing-invalid-rsp-field.patch
new file mode 100644
index 0000000..aeb27a6
--- /dev/null
+++ b/queue-5.17/scsi-ufs-core-fix-referencing-invalid-rsp-field.patch
@@ -0,0 +1,63 @@
+From 174ceeb3fed5abcc435dfee6ff0e87801dafcfcf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 May 2022 15:05:29 +0900
+Subject: scsi: ufs: core: Fix referencing invalid rsp field
+
+From: Daejun Park <daejun7.park@samsung.com>
+
+[ Upstream commit d5d92b64408443e113b9742f8f1c35278910dd4d ]
+
+Fix referencing sense data when it is invalid. When the length of the data
+segment is 0, there is no valid information in the rsp field, so
+ufshpb_rsp_upiu() is returned without additional operation.
+
+Link: https://lore.kernel.org/r/252651381.41652940482659.JavaMail.epsvc@epcpadp4
+Fixes: 4b5f49079c52 ("scsi: ufs: ufshpb: L2P map management for HPB read")
+Acked-by: Avri Altman <avri.altman@wdc.com>
+Signed-off-by: Daejun Park <daejun7.park@samsung.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/ufs/ufshpb.c | 19 +++++++------------
+ 1 file changed, 7 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
+index b34feba1f53d..8dc818b03939 100644
+--- a/drivers/scsi/ufs/ufshpb.c
++++ b/drivers/scsi/ufs/ufshpb.c
+@@ -1256,6 +1256,13 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+ struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
+ int data_seg_len;
+
++ data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
++ & MASK_RSP_UPIU_DATA_SEG_LEN;
++
++ /* If data segment length is zero, rsp_field is not valid */
++ if (!data_seg_len)
++ return;
++
+ if (unlikely(lrbp->lun != rsp_field->lun)) {
+ struct scsi_device *sdev;
+ bool found = false;
+@@ -1290,18 +1297,6 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+ return;
+ }
+
+- data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
+- & MASK_RSP_UPIU_DATA_SEG_LEN;
+-
+- /* To flush remained rsp_list, we queue the map_work task */
+- if (!data_seg_len) {
+- if (!ufshpb_is_general_lun(hpb->lun))
+- return;
+-
+- ufshpb_kick_map_work(hpb);
+- return;
+- }
+-
+ BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
+
+ if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
+--
+2.35.1
+
diff --git a/queue-5.17/selftests-add-ping-test-with-ping_group_range-tuned.patch b/queue-5.17/selftests-add-ping-test-with-ping_group_range-tuned.patch
new file mode 100644
index 0000000..9ba5370
--- /dev/null
+++ b/queue-5.17/selftests-add-ping-test-with-ping_group_range-tuned.patch
@@ -0,0 +1,67 @@
+From 32779761e91ba0ecc19000e5690a69df55e60135 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 May 2022 11:07:39 +0200
+Subject: selftests: add ping test with ping_group_range tuned
+
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+
+[ Upstream commit e71b7f1f44d3d88c677769c85ef0171caf9fc89f ]
+
+The 'ping' utility is able to manage two kind of sockets (raw or icmp),
+depending on the sysctl ping_group_range. By default, ping_group_range is
+set to '1 0', which forces ping to use an ip raw socket.
+
+Let's replay the ping tests by allowing 'ping' to use the ip icmp socket.
+After the previous patch, ipv4 tests results are the same with both kinds
+of socket. For ipv6, there are a lot a new failures (the previous patch
+fixes only two cases).
+
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/fcnal-test.sh | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
+index 3f4c8cfe7aca..7cd9b31d0307 100755
+--- a/tools/testing/selftests/net/fcnal-test.sh
++++ b/tools/testing/selftests/net/fcnal-test.sh
+@@ -810,10 +810,16 @@ ipv4_ping()
+ setup
+ set_sysctl net.ipv4.raw_l3mdev_accept=1 2>/dev/null
+ ipv4_ping_novrf
++ setup
++ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
++ ipv4_ping_novrf
+
+ log_subsection "With VRF"
+ setup "yes"
+ ipv4_ping_vrf
++ setup "yes"
++ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
++ ipv4_ping_vrf
+ }
+
+ ################################################################################
+@@ -2348,10 +2354,16 @@ ipv6_ping()
+ log_subsection "No VRF"
+ setup
+ ipv6_ping_novrf
++ setup
++ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
++ ipv6_ping_novrf
+
+ log_subsection "With VRF"
+ setup "yes"
+ ipv6_ping_vrf
++ setup "yes"
++ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
++ ipv6_ping_vrf
+ }
+
+ ################################################################################
+--
+2.35.1
+
diff --git a/queue-5.17/series b/queue-5.17/series
index d8b041d..b678f2b 100644
--- a/queue-5.17/series
+++ b/queue-5.17/series
@@ -65,3 +65,90 @@
drm-dp-mst-fix-a-possible-memory-leak-in-fetch_monitor_name.patch
dma-buf-fix-use-of-dma_buf_set_name_-a-b-in-userspace.patch
dma-buf-ensure-unique-directory-name-for-dmabuf-stats.patch
+arm64-dts-qcom-sm8250-don-t-enable-rx-tx-macro-by-de.patch
+arm-dts-aspeed-g6-remove-fwqspid-group-in-pinctrl-dt.patch
+pinctrl-pinctrl-aspeed-g6-remove-fwqspid-group-in-pi.patch
+arm-dts-aspeed-g6-fix-spi1-spi2-quad-pin-group.patch
+arm-dts-aspeed-add-video-engine-to-g6.patch
+pinctrl-ocelot-fix-for-lan966x-alt-mode.patch
+pinctrl-mediatek-mt8365-fix-ies-control-pins.patch
+alsa-hda-fix-unused-realtek-function-when-pm-is-not-.patch
+net-ipa-certain-dropped-packets-aren-t-accounted-for.patch
+net-ipa-record-proper-rx-transaction-count.patch
+block-mq-deadline-set-the-fifo_time-member-also-if-i.patch
+mptcp-fix-subflow-accounting-on-close.patch
+net-macb-increment-rx-bd-head-after-allocating-skb-a.patch
+i915-guc-reset-make-__guc_reset_context-aware-of-gui.patch
+xfrm-rework-default-policy-structure.patch
+xfrm-fix-disable_policy-flag-use-when-arriving-from-.patch
+net-sched-act_pedit-sanitize-shift-argument-before-u.patch
+netfilter-flowtable-fix-excessive-hw-offload-attempt.patch
+netfilter-nft_flow_offload-skip-dst-neigh-lookup-for.patch
+net-fix-dev_fill_forward_path-with-pppoe-bridge.patch
+netfilter-nft_flow_offload-fix-offload-with-pppoe-vl.patch
+ptp-ocp-have-adjtime-handle-negative-delta_ns-correc.patch
+revert-pci-aardvark-rewrite-irq-code-to-chained-irq-.patch
+net-lan966x-fix-assignment-of-the-mac-address.patch
+net-systemport-fix-an-error-handling-path-in-bcm_sys.patch
+net-vmxnet3-fix-possible-use-after-free-bugs-in-vmxn.patch
+net-vmxnet3-fix-possible-null-pointer-dereference-in.patch
+arm64-kexec-load-from-kimage-prior-to-clobbering.patch
+ice-fix-crash-when-writing-timestamp-on-rx-rings.patch
+ice-fix-possible-under-reporting-of-ethtool-tx-and-r.patch
+ice-fix-interrupt-moderation-settings-getting-cleare.patch
+clk-at91-generated-consider-range-when-calculating-b.patch
+net-qla3xxx-fix-a-test-in-ql_reset_work.patch
+nfc-nci-fix-sleep-in-atomic-context-bugs-caused-by-n.patch
+net-mlx5-dr-fix-missing-flow_source-when-creating-mu.patch
+net-mlx5-initialize-flow-steering-during-driver-prob.patch
+net-mlx5-dr-ignore-modify-ttl-on-rx-if-device-doesn-.patch
+net-mlx5e-block-rx-gro-hw-feature-in-switchdev-mode.patch
+net-mlx5e-properly-block-lro-when-xdp-is-enabled.patch
+net-mlx5e-properly-block-hw-gro-when-xdp-is-enabled.patch
+net-mlx5e-remove-hw-gro-from-reported-features.patch
+net-mlx5-drain-fw_reset-when-removing-device.patch
+net-af_key-add-check-for-pfkey_broadcast-in-function.patch
+arm-9196-1-spectre-bhb-enable-for-cortex-a15.patch
+arm-9197-1-spectre-bhb-fix-loop8-sequence-for-thumb2.patch
+mptcp-fix-checksum-byte-order.patch
+mptcp-strict-local-address-id-selection.patch
+mptcp-do-tcp-fallback-on-early-dss-checksum-failure.patch
+igb-skip-phy-status-check-where-unavailable.patch
+netfilter-flowtable-fix-tcp-flow-teardown.patch
+netfilter-flowtable-pass-flowtable-to-nf_flow_table_.patch
+netfilter-flowtable-move-dst_check-to-packet-path.patch
+vdpa-mlx5-use-consistent-rqt-size.patch
+net-bridge-clear-offload_fwd_mark-when-passing-frame.patch
+riscv-dts-sifive-fu540-c000-align-dma-node-name-with.patch
+scsi-ufs-core-fix-referencing-invalid-rsp-field.patch
+kvm-x86-pmu-fix-the-compare-function-used-by-the-pmu.patch
+perf-build-fix-check-for-btf__load_from_kernel_by_id.patch
+perf-stat-fix-and-validate-cpu-map-inputs-in-synthet.patch
+gpio-gpio-vf610-do-not-touch-other-bits-when-set-the.patch
+gpio-mvebu-pwm-refuse-requests-with-inverted-polarit.patch
+perf-test-fix-all-pmu-test-to-skip-hv_24x7-hv_gpci-t.patch
+perf-regs-x86-fix-arch__intr_reg_mask-for-the-hybrid.patch
+perf-bench-numa-address-compiler-error-on-s390.patch
+perf-test-bpf-skip-test-if-clang-is-not-present.patch
+scsi-scsi_dh_alua-properly-handle-the-alua-transitio.patch
+scsi-qla2xxx-fix-missed-dma-unmap-for-aborted-comman.patch
+mac80211-fix-rx-reordering-with-non-explicit-psmp-ac.patch
+nl80211-validate-s1g-channel-width.patch
+cfg80211-retrieve-s1g-operating-channel-number.patch
+selftests-add-ping-test-with-ping_group_range-tuned.patch
+revert-fbdev-make-fb_release-return-enodev-if-fbdev-.patch
+fbdev-prevent-possible-use-after-free-in-fb_release.patch
+platform-x86-thinkpad_acpi-convert-btusb-dmi-list-to.patch
+platform-x86-thinkpad_acpi-correct-dual-fan-probe.patch
+platform-x86-intel-fix-rmmod-pmt_telemetry-panic.patch
+platform-surface-gpe-add-support-for-surface-pro-8.patch
+drm-amd-display-undo-clearing-of-z10-related-functio.patch
+net-fix-wrong-network-header-length.patch
+nl80211-fix-locking-in-nl80211_set_tx_bitrate_mask.patch
+ethernet-tulip-fix-missing-pci_disable_device-on-err.patch
+net-stmmac-fix-missing-pci_disable_device-on-error-i.patch
+net-atlantic-fix-frag-0-not-initialized.patch
+net-atlantic-reduce-scope-of-is_rsc_complete.patch
+net-atlantic-add-check-for-max_skb_frags.patch
+net-atlantic-verify-hw_head_-lies-within-tx-buffer-r.patch
+arm64-enable-repeat-tlbi-workaround-on-kryo4xx-gold-.patch
diff --git a/queue-5.17/vdpa-mlx5-use-consistent-rqt-size.patch b/queue-5.17/vdpa-mlx5-use-consistent-rqt-size.patch
new file mode 100644
index 0000000..ec487f7
--- /dev/null
+++ b/queue-5.17/vdpa-mlx5-use-consistent-rqt-size.patch
@@ -0,0 +1,223 @@
+From 89d8069ddaafcc1310ba6d335ccf29f3e13de70a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 May 2022 11:47:35 +0300
+Subject: vdpa/mlx5: Use consistent RQT size
+
+From: Eli Cohen <elic@nvidia.com>
+
+[ Upstream commit acde3929492bcb9ceb0df1270230c422b1013798 ]
+
+The current code evaluates RQT size based on the configured number of
+virtqueues. This can raise an issue in the following scenario:
+
+Assume MQ was negotiated.
+1. mlx5_vdpa_set_map() gets called.
+2. handle_ctrl_mq() is called setting cur_num_vqs to some value, lower
+ than the configured max VQs.
+3. A second set_map gets called, but now a smaller number of VQs is used
+ to evaluate the size of the RQT.
+4. handle_ctrl_mq() is called with a value larger than what the RQT can
+ hold. This will emit errors and the driver state is compromised.
+
+To fix this, we use a new field in struct mlx5_vdpa_net to hold the
+required number of entries in the RQT. This value is evaluated in
+mlx5_vdpa_set_driver_features() where we have the negotiated features
+all set up.
+
+In addition to that, we take into consideration the max capability of RQT
+entries early when the device is added so we don't need to take consider
+it when creating the RQT.
+
+Last, we remove the use of mlx5_vdpa_max_qps() which just returns the
+max_vas / 2 and make the code clearer.
+
+Fixes: 52893733f2c5 ("vdpa/mlx5: Add multiqueue support")
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Eli Cohen <elic@nvidia.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/mlx5/net/mlx5_vnet.c | 61 +++++++++++--------------------
+ 1 file changed, 21 insertions(+), 40 deletions(-)
+
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 1b5de3af1a62..9c45be8ab178 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -161,6 +161,7 @@ struct mlx5_vdpa_net {
+ struct mlx5_flow_handle *rx_rule_mcast;
+ bool setup;
+ u32 cur_num_vqs;
++ u32 rqt_size;
+ struct notifier_block nb;
+ struct vdpa_callback config_cb;
+ struct mlx5_vdpa_wq_ent cvq_ent;
+@@ -204,17 +205,12 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
+ return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
+ }
+
+-static inline u32 mlx5_vdpa_max_qps(int max_vqs)
+-{
+- return max_vqs / 2;
+-}
+-
+ static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
+ {
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
+ return 2;
+
+- return 2 * mlx5_vdpa_max_qps(mvdev->max_vqs);
++ return mvdev->max_vqs;
+ }
+
+ static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
+@@ -1236,25 +1232,13 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
+ static int create_rqt(struct mlx5_vdpa_net *ndev)
+ {
+ __be32 *list;
+- int max_rqt;
+ void *rqtc;
+ int inlen;
+ void *in;
+ int i, j;
+ int err;
+- int num;
+-
+- if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
+- num = 1;
+- else
+- num = ndev->cur_num_vqs / 2;
+
+- max_rqt = min_t(int, roundup_pow_of_two(num),
+- 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+- if (max_rqt < 1)
+- return -EOPNOTSUPP;
+-
+- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
++ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+@@ -1263,12 +1247,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
+- MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
++ MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
+ list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
+- for (i = 0, j = 0; i < max_rqt; i++, j += 2)
+- list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
++ for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
++ list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
+
+- MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
++ MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
+ err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
+ kfree(in);
+ if (err)
+@@ -1282,19 +1266,13 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
+ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
+ {
+ __be32 *list;
+- int max_rqt;
+ void *rqtc;
+ int inlen;
+ void *in;
+ int i, j;
+ int err;
+
+- max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
+- 1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
+- if (max_rqt < 1)
+- return -EOPNOTSUPP;
+-
+- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + max_rqt * MLX5_ST_SZ_BYTES(rq_num);
++ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+@@ -1305,10 +1283,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
+ MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
+
+ list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
+- for (i = 0, j = 0; i < max_rqt; i++, j += 2)
++ for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+ list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
+
+- MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
++ MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
+ err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
+ kfree(in);
+ if (err)
+@@ -1582,7 +1560,7 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
+
+ newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
+ if (newqps < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+- newqps > mlx5_vdpa_max_qps(mvdev->max_vqs))
++ newqps > ndev->rqt_size)
+ break;
+
+ if (ndev->cur_num_vqs == 2 * newqps) {
+@@ -1937,7 +1915,7 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
+ int err;
+ int i;
+
+- for (i = 0; i < 2 * mlx5_vdpa_max_qps(mvdev->max_vqs); i++) {
++ for (i = 0; i < mvdev->max_vqs; i++) {
+ err = setup_vq(ndev, &ndev->vqs[i]);
+ if (err)
+ goto err_vq;
+@@ -2008,9 +1986,11 @@ static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
+
+ ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
+ if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
+- ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
++ ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
+ else
+- ndev->cur_num_vqs = 2;
++ ndev->rqt_size = 1;
++
++ ndev->cur_num_vqs = 2 * ndev->rqt_size;
+
+ update_cvq_info(mvdev);
+ return err;
+@@ -2463,7 +2443,7 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev)
+ struct mlx5_vdpa_virtqueue *mvq;
+ int i;
+
+- for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
++ for (i = 0; i < ndev->mvdev.max_vqs; ++i) {
+ mvq = &ndev->vqs[i];
+ memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
+ mvq->index = i;
+@@ -2583,7 +2563,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ return -EOPNOTSUPP;
+ }
+
+- max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
++ max_vqs = min_t(int, MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues),
++ 1 << MLX5_CAP_GEN(mdev, log_max_rqt_size));
+ if (max_vqs < 2) {
+ dev_warn(mdev->device,
+ "%d virtqueues are supported. At least 2 are required\n",
+@@ -2647,7 +2628,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MAC);
+ }
+
+- config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, mlx5_vdpa_max_qps(max_vqs));
++ config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2);
+ mvdev->vdev.dma_dev = &mdev->pdev->dev;
+ err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
+ if (err)
+@@ -2674,7 +2655,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ ndev->nb.notifier_call = event_handler;
+ mlx5_notifier_register(mdev, &ndev->nb);
+ mvdev->vdev.mdev = &mgtdev->mgtdev;
+- err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
++ err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
+ if (err)
+ goto err_reg;
+
+--
+2.35.1
+
diff --git a/queue-5.17/xfrm-fix-disable_policy-flag-use-when-arriving-from-.patch b/queue-5.17/xfrm-fix-disable_policy-flag-use-when-arriving-from-.patch
new file mode 100644
index 0000000..c9bbac4
--- /dev/null
+++ b/queue-5.17/xfrm-fix-disable_policy-flag-use-when-arriving-from-.patch
@@ -0,0 +1,183 @@
+From 306a7682e53557656e82564c04fc458dc4aeb81e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 May 2022 23:34:02 +0300
+Subject: xfrm: fix "disable_policy" flag use when arriving from different
+ devices
+
+From: Eyal Birger <eyal.birger@gmail.com>
+
+[ Upstream commit e6175a2ed1f18bf2f649625bf725e07adcfa6a28 ]
+
+In IPv4 setting the "disable_policy" flag on a device means no policy
+should be enforced for traffic originating from the device. This was
+implemented by seting the DST_NOPOLICY flag in the dst based on the
+originating device.
+
+However, dsts are cached in nexthops regardless of the originating
+devices, in which case, the DST_NOPOLICY flag value may be incorrect.
+
+Consider the following setup:
+
+ +------------------------------+
+ | ROUTER |
+ +-------------+ | +-----------------+ |
+ | ipsec src |----|-|ipsec0 | |
+ +-------------+ | |disable_policy=0 | +----+ |
+ | +-----------------+ |eth1|-|-----
+ +-------------+ | +-----------------+ +----+ |
+ | noipsec src |----|-|eth0 | |
+ +-------------+ | |disable_policy=1 | |
+ | +-----------------+ |
+ +------------------------------+
+
+Where ROUTER has a default route towards eth1.
+
+dst entries for traffic arriving from eth0 would have DST_NOPOLICY
+and would be cached and therefore can be reused by traffic originating
+from ipsec0, skipping policy check.
+
+Fix by setting a IPSKB_NOPOLICY flag in IPCB and observing it instead
+of the DST in IN/FWD IPv4 policy checks.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: Shmulik Ladkani <shmulik.ladkani@gmail.com>
+Signed-off-by: Eyal Birger <eyal.birger@gmail.com>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/ip.h | 1 +
+ include/net/xfrm.h | 14 +++++++++++++-
+ net/ipv4/route.c | 23 ++++++++++++++++++-----
+ 3 files changed, 32 insertions(+), 6 deletions(-)
+
+diff --git a/include/net/ip.h b/include/net/ip.h
+index b51bae43b0dd..9fba950fdf12 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -56,6 +56,7 @@ struct inet_skb_parm {
+ #define IPSKB_DOREDIRECT BIT(5)
+ #define IPSKB_FRAG_PMTU BIT(6)
+ #define IPSKB_L3SLAVE BIT(7)
++#define IPSKB_NOPOLICY BIT(8)
+
+ u16 frag_max_size;
+ };
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 6fb899ff5afc..d2efddce65d4 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -1093,6 +1093,18 @@ static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
+ return false;
+ }
+
++static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
++ int dir, unsigned short family)
++{
++ if (dir != XFRM_POLICY_OUT && family == AF_INET) {
++ /* same dst may be used for traffic originating from
++ * devices with different policy settings.
++ */
++ return IPCB(skb)->flags & IPSKB_NOPOLICY;
++ }
++ return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
++}
++
+ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
+ struct sk_buff *skb,
+ unsigned int family, int reverse)
+@@ -1104,7 +1116,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
+ return __xfrm_policy_check(sk, ndir, skb, family);
+
+ return __xfrm_check_nopolicy(net, skb, dir) ||
+- (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
++ __xfrm_check_dev_nopolicy(skb, dir, family) ||
+ __xfrm_policy_check(sk, ndir, skb, family);
+ }
+
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index eef07b62b2d8..1cdfac733bd8 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1721,6 +1721,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+ unsigned int flags = RTCF_MULTICAST;
+ struct rtable *rth;
++ bool no_policy;
+ u32 itag = 0;
+ int err;
+
+@@ -1731,8 +1732,12 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ if (our)
+ flags |= RTCF_LOCAL;
+
++ no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
++ if (no_policy)
++ IPCB(skb)->flags |= IPSKB_NOPOLICY;
++
+ rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
+- IN_DEV_ORCONF(in_dev, NOPOLICY), false);
++ no_policy, false);
+ if (!rth)
+ return -ENOBUFS;
+
+@@ -1791,7 +1796,7 @@ static int __mkroute_input(struct sk_buff *skb,
+ struct rtable *rth;
+ int err;
+ struct in_device *out_dev;
+- bool do_cache;
++ bool do_cache, no_policy;
+ u32 itag = 0;
+
+ /* get a working reference to the output device */
+@@ -1836,6 +1841,10 @@ static int __mkroute_input(struct sk_buff *skb,
+ }
+ }
+
++ no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
++ if (no_policy)
++ IPCB(skb)->flags |= IPSKB_NOPOLICY;
++
+ fnhe = find_exception(nhc, daddr);
+ if (do_cache) {
+ if (fnhe)
+@@ -1848,8 +1857,7 @@ static int __mkroute_input(struct sk_buff *skb,
+ }
+ }
+
+- rth = rt_dst_alloc(out_dev->dev, 0, res->type,
+- IN_DEV_ORCONF(in_dev, NOPOLICY),
++ rth = rt_dst_alloc(out_dev->dev, 0, res->type, no_policy,
+ IN_DEV_ORCONF(out_dev, NOXFRM));
+ if (!rth) {
+ err = -ENOBUFS;
+@@ -2224,6 +2232,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ struct rtable *rth;
+ struct flowi4 fl4;
+ bool do_cache = true;
++ bool no_policy;
+
+ /* IP on this device is disabled. */
+
+@@ -2341,6 +2350,10 @@ out: return err;
+ RT_CACHE_STAT_INC(in_brd);
+
+ local_input:
++ no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
++ if (no_policy)
++ IPCB(skb)->flags |= IPSKB_NOPOLICY;
++
+ do_cache &= res->fi && !itag;
+ if (do_cache) {
+ struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+@@ -2355,7 +2368,7 @@ out: return err;
+
+ rth = rt_dst_alloc(ip_rt_get_dev(net, res),
+ flags | RTCF_LOCAL, res->type,
+- IN_DEV_ORCONF(in_dev, NOPOLICY), false);
++ no_policy, false);
+ if (!rth)
+ goto e_nobufs;
+
+--
+2.35.1
+
diff --git a/queue-5.17/xfrm-rework-default-policy-structure.patch b/queue-5.17/xfrm-rework-default-policy-structure.patch
new file mode 100644
index 0000000..659f1af
--- /dev/null
+++ b/queue-5.17/xfrm-rework-default-policy-structure.patch
@@ -0,0 +1,237 @@
+From 907016b293f6d252ee94fdfb1fad394fa65003f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Mar 2022 11:38:22 +0100
+Subject: xfrm: rework default policy structure
+
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+
+[ Upstream commit b58b1f563ab78955d37e9e43e02790a85c66ac05 ]
+
+This is a follow up of commit f8d858e607b2 ("xfrm: make user policy API
+complete"). The goal is to align userland API to the internal structures.
+
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Reviewed-by: Antony Antony <antony.antony@secunet.com>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netns/xfrm.h | 6 +----
+ include/net/xfrm.h | 48 +++++++++++++++-------------------------
+ net/xfrm/xfrm_policy.c | 10 ++++++---
+ net/xfrm/xfrm_user.c | 43 +++++++++++++++--------------------
+ 4 files changed, 44 insertions(+), 63 deletions(-)
+
+diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
+index 947733a639a6..bd7c3be4af5d 100644
+--- a/include/net/netns/xfrm.h
++++ b/include/net/netns/xfrm.h
+@@ -66,11 +66,7 @@ struct netns_xfrm {
+ int sysctl_larval_drop;
+ u32 sysctl_acq_expires;
+
+- u8 policy_default;
+-#define XFRM_POL_DEFAULT_IN 1
+-#define XFRM_POL_DEFAULT_OUT 2
+-#define XFRM_POL_DEFAULT_FWD 4
+-#define XFRM_POL_DEFAULT_MASK 7
++ u8 policy_default[XFRM_POLICY_MAX];
+
+ #ifdef CONFIG_SYSCTL
+ struct ctl_table_header *sysctl_hdr;
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 76aa6f11a540..6fb899ff5afc 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -1081,25 +1081,18 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un
+ }
+
+ #ifdef CONFIG_XFRM
+-static inline bool
+-xfrm_default_allow(struct net *net, int dir)
+-{
+- u8 def = net->xfrm.policy_default;
+-
+- switch (dir) {
+- case XFRM_POLICY_IN:
+- return def & XFRM_POL_DEFAULT_IN ? false : true;
+- case XFRM_POLICY_OUT:
+- return def & XFRM_POL_DEFAULT_OUT ? false : true;
+- case XFRM_POLICY_FWD:
+- return def & XFRM_POL_DEFAULT_FWD ? false : true;
+- }
+- return false;
+-}
+-
+ int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
+ unsigned short family);
+
++static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
++ int dir)
++{
++ if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
++ return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
++
++ return false;
++}
++
+ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
+ struct sk_buff *skb,
+ unsigned int family, int reverse)
+@@ -1110,13 +1103,9 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
+ if (sk && sk->sk_policy[XFRM_POLICY_IN])
+ return __xfrm_policy_check(sk, ndir, skb, family);
+
+- if (xfrm_default_allow(net, dir))
+- return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
+- (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
+- __xfrm_policy_check(sk, ndir, skb, family);
+- else
+- return (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
+- __xfrm_policy_check(sk, ndir, skb, family);
++ return __xfrm_check_nopolicy(net, skb, dir) ||
++ (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
++ __xfrm_policy_check(sk, ndir, skb, family);
+ }
+
+ static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
+@@ -1168,13 +1157,12 @@ static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
+ {
+ struct net *net = dev_net(skb->dev);
+
+- if (xfrm_default_allow(net, XFRM_POLICY_OUT))
+- return !net->xfrm.policy_count[XFRM_POLICY_OUT] ||
+- (skb_dst(skb)->flags & DST_NOXFRM) ||
+- __xfrm_route_forward(skb, family);
+- else
+- return (skb_dst(skb)->flags & DST_NOXFRM) ||
+- __xfrm_route_forward(skb, family);
++ if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
++ net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
++ return true;
++
++ return (skb_dst(skb)->flags & DST_NOXFRM) ||
++ __xfrm_route_forward(skb, family);
+ }
+
+ static inline int xfrm4_route_forward(struct sk_buff *skb)
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 882526159d3a..19aa994f5d2c 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3158,7 +3158,7 @@ struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
+
+ nopol:
+ if (!(dst_orig->dev->flags & IFF_LOOPBACK) &&
+- !xfrm_default_allow(net, dir)) {
++ net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
+ err = -EPERM;
+ goto error;
+ }
+@@ -3569,7 +3569,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ }
+
+ if (!pol) {
+- if (!xfrm_default_allow(net, dir)) {
++ if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
+ return 0;
+ }
+@@ -3629,7 +3629,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
+ }
+ xfrm_nr = ti;
+
+- if (!xfrm_default_allow(net, dir) && !xfrm_nr) {
++ if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK &&
++ !xfrm_nr) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
+ goto reject;
+ }
+@@ -4118,6 +4119,9 @@ static int __net_init xfrm_net_init(struct net *net)
+ spin_lock_init(&net->xfrm.xfrm_policy_lock);
+ seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
+ mutex_init(&net->xfrm.xfrm_cfg_mutex);
++ net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
++ net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
++ net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
+
+ rv = xfrm_statistics_init(net);
+ if (rv < 0)
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 72b2f173aac8..64fa8fdd6bbd 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1994,12 +1994,9 @@ static int xfrm_notify_userpolicy(struct net *net)
+ }
+
+ up = nlmsg_data(nlh);
+- up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ?
+- XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+- up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ?
+- XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+- up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ?
+- XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
++ up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
++ up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
++ up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
+
+ nlmsg_end(skb, nlh);
+
+@@ -2010,26 +2007,26 @@ static int xfrm_notify_userpolicy(struct net *net)
+ return err;
+ }
+
++static bool xfrm_userpolicy_is_valid(__u8 policy)
++{
++ return policy == XFRM_USERPOLICY_BLOCK ||
++ policy == XFRM_USERPOLICY_ACCEPT;
++}
++
+ static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct nlattr **attrs)
+ {
+ struct net *net = sock_net(skb->sk);
+ struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
+
+- if (up->in == XFRM_USERPOLICY_BLOCK)
+- net->xfrm.policy_default |= XFRM_POL_DEFAULT_IN;
+- else if (up->in == XFRM_USERPOLICY_ACCEPT)
+- net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_IN;
++ if (xfrm_userpolicy_is_valid(up->in))
++ net->xfrm.policy_default[XFRM_POLICY_IN] = up->in;
+
+- if (up->fwd == XFRM_USERPOLICY_BLOCK)
+- net->xfrm.policy_default |= XFRM_POL_DEFAULT_FWD;
+- else if (up->fwd == XFRM_USERPOLICY_ACCEPT)
+- net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_FWD;
++ if (xfrm_userpolicy_is_valid(up->fwd))
++ net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd;
+
+- if (up->out == XFRM_USERPOLICY_BLOCK)
+- net->xfrm.policy_default |= XFRM_POL_DEFAULT_OUT;
+- else if (up->out == XFRM_USERPOLICY_ACCEPT)
+- net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_OUT;
++ if (xfrm_userpolicy_is_valid(up->out))
++ net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out;
+
+ rt_genid_bump_all(net);
+
+@@ -2059,13 +2056,9 @@ static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
+ }
+
+ r_up = nlmsg_data(r_nlh);
+-
+- r_up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ?
+- XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+- r_up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ?
+- XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
+- r_up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ?
+- XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT;
++ r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
++ r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
++ r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
+ nlmsg_end(r_skb, r_nlh);
+
+ return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
+--
+2.35.1
+