7.0-stable patches added patches: 9p-fix-access-mode-flags-being-ored-instead-of-replaced.patch amdgpu-jpeg-fix-deepsleep-register-for-jpeg-5_0_0-and-5_0_2.patch apparmor-use-target-task-s-context-in-apparmor_getprocattr.patch arm-9472-1-fix-race-condition-on-pg_dcache_clean-in-__sync_icache_dcache.patch bluetooth-hci_event-fix-potential-uaf-in-ssp-passkey-handlers.patch bus-mhi-host-pci_generic-switch-to-async-power-up-to-avoid-boot-delays.patch can-ucan-fix-devres-lifetime.patch ceph-fix-num_ops-off-by-one-when-crypto-allocation-fails.patch ceph-only-d_add-negative-dentries-when-they-are-unhashed.patch check-uapi-link-into-shared-objects.patch crypto-acomp-fix-wrong-pointer-stored-by-acomp_save_req.patch crypto-arm64-aes-fix-32-bit-aes_mac_update-arg-treated-as-64-bit.patch crypto-atmel-aes-fix-3-page-memory-leak-in-atmel_aes_buff_cleanup.patch crypto-atmel-ecc-release-client-on-allocation-failure.patch crypto-atmel-sha204a-fix-error-codes-in-otp-reads.patch crypto-atmel-sha204a-fix-potential-uaf-and-memory-leak-in-remove-path.patch crypto-atmel-sha204a-fix-uninitialized-data-access-on-otp-read-error.patch crypto-atmel-tdes-fix-dma-sync-direction.patch crypto-ccree-fix-a-memory-leak-in-cc_mac_digest.patch crypto-hisilicon-fix-dma_unmap_single-direction.patch crypto-nx-fix-bounce-buffer-leaks-in-nx842_crypto_-alloc-free-_ctx.patch crypto-nx-fix-context-leak-in-nx842_crypto_free_ctx.patch crypto-nx-fix-packed-layout-in-struct-nx842_crypto_header.patch dm-mirror-fix-integer-overflow-in-create_dirty_log.patch erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch ext4-fix-bounds-check-in-check_xattrs-to-prevent-out-of-bounds-access.patch ext4-fix-missing-brelse-in-ext4_xattr_inode_dec_ref_all.patch gtp-disable-bh-before-calling-udp_tunnel_xmit_skb.patch hid-apple-ensure-the-keyboard-backlight-is-off-if-suspending.patch ib-core-fix-zero-dmac-race-in-neighbor-resolution.patch inotify-fix-watch-count-leak-when-fsnotify_add_inode_mark_locked-fails.patch ktest-fix-the-month-in-the-name-of-the-failure-directory.patch md-md-llbitmap-raise-barrier-before-state-machine-transition.patch md-md-llbitmap-skip-reading-rdevs-that-are-not-in_sync.patch md-raid5-fix-soft-lockup-in-retry_aligned_read.patch md-raid5-validate-payload-size-before-accessing-journal-metadata.patch mfd-core-preserve-of-node-when-acpi-handle-is-present.patch mm-swap-speed-up-hibernation-allocation-and-writeout.patch mptcp-sync-the-msk-sndbuf-at-accept-time.patch mtd-spi-nor-sst-fix-write-enable-before-aai-sequence.patch mtd-spinand-winbond-declare-the-qe-bit-on-w25nxxjw.patch nfsv4.1-apply-session-size-limits-on-clone-path.patch ntfs3-add-buffer-boundary-checks-to-run_unpack.patch ntfs3-fix-integer-overflow-in-run_unpack-volume-boundary-check.patch ring-buffer-do-not-double-count-the-reader_page.patch rtmutex-use-waiter-task-instead-of-current-in-remove_waiter.patch rxgk-fix-potential-integer-overflow-in-length-check.patch sched_ext-documentation-clarify-ops.dispatch-role-in-task-lifecycle.patch scsi-sd-fix-missing-put_disk-when-device_add-disk_dev-fails.patch seg6-fix-seg6-lwtunnel-output-redirect-for-l2-reduced-encap-mode.patch taskstats-set-version-in-tgid-exit-notifications.patch tcp-call-sk_data_ready-after-listener-migration.patch udf-fix-partition-descriptor-append-bookkeeping.patch wifi-rtl8xxxu-fix-potential-use-of-uninitialized-value.patch x86-cpu-disable-fred-when-pti-is-forced-on.patch x86-shstk-prevent-deadlock-during-shstk-sigreturn.patch
diff --git a/queue-7.0/9p-fix-access-mode-flags-being-ored-instead-of-replaced.patch b/queue-7.0/9p-fix-access-mode-flags-being-ored-instead-of-replaced.patch new file mode 100644 index 0000000..67f4861 --- /dev/null +++ b/queue-7.0/9p-fix-access-mode-flags-being-ored-instead-of-replaced.patch
@@ -0,0 +1,53 @@ +From da2346a48a5a1fed86c3fe3d73c0b60e7b3027c9 Mon Sep 17 00:00:00 2001 +From: Pierre Barre <pierre@barre.sh> +Date: Thu, 2 Apr 2026 12:03:12 +0200 +Subject: 9p: fix access mode flags being ORed instead of replaced + +From: Pierre Barre <pierre@barre.sh> + +commit da2346a48a5a1fed86c3fe3d73c0b60e7b3027c9 upstream. + +Since commit 1f3e4142c0eb ("9p: convert to the new mount API"), +v9fs_apply_options() applies parsed mount flags with |= onto flags +already set by v9fs_session_init(). For 9P2000.L, session_init sets +V9FS_ACCESS_CLIENT as the default, so when the user mounts with +"access=user", both bits end up set. Access mode checks compare +against exact values, so having both bits set matches neither mode. + +This causes v9fs_fid_lookup() to fall through to the default switch +case, using INVALID_UID (nobody/65534) instead of current_fsuid() +for all fid lookups. Root is then unable to chown or perform other +privileged operations. + +Fix by clearing the access mask before applying the user's choice. + +Fixes: 1f3e4142c0eb ("9p: convert to the new mount API") +Signed-off-by: Pierre Barre <pierre@barre.sh> +Reviewed-by: Christian Schoenebeck <linux_oss@crudebyte.com> +Message-ID: <0ddc72da-d196-4f01-8755-0086f670e779@app.fastmail.com> +Cc: stable@vger.kernel.org +Signed-off-by: Dominique Martinet <asmadeus@codewreck.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + fs/9p/v9fs.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c +index 057487efaaeb..acda42499ca9 100644 +--- a/fs/9p/v9fs.c ++++ b/fs/9p/v9fs.c +@@ -413,7 +413,11 @@ static void v9fs_apply_options(struct v9fs_session_info *v9ses, + /* + * Note that we must |= flags here as session_init already + * set basic flags. This adds in flags from parsed options. ++ * Default access flags must be cleared if session options ++ * changes them to avoid mangling the setting. + */ ++ if (ctx->session_opts.flags & V9FS_ACCESS_MASK) ++ v9ses->flags &= ~V9FS_ACCESS_MASK; + v9ses->flags |= ctx->session_opts.flags; + #ifdef CONFIG_9P_FSCACHE + v9ses->cachetag = ctx->session_opts.cachetag; +-- +2.54.0 +
diff --git a/queue-7.0/amdgpu-jpeg-fix-deepsleep-register-for-jpeg-5_0_0-and-5_0_2.patch b/queue-7.0/amdgpu-jpeg-fix-deepsleep-register-for-jpeg-5_0_0-and-5_0_2.patch new file mode 100644 index 0000000..c239075 --- /dev/null +++ b/queue-7.0/amdgpu-jpeg-fix-deepsleep-register-for-jpeg-5_0_0-and-5_0_2.patch
@@ -0,0 +1,104 @@ +From e90dc3b2d73986610476b02c29d0074aa4d92fb0 Mon Sep 17 00:00:00 2001 +From: "David (Ming Qiang) Wu" <David.Wu3@amd.com> +Date: Mon, 9 Mar 2026 18:48:37 -0400 +Subject: amdgpu/jpeg: fix deepsleep register for jpeg 5_0_0 and 5_0_2 + +From: David (Ming Qiang) Wu <David.Wu3@amd.com> + +commit e90dc3b2d73986610476b02c29d0074aa4d92fb0 upstream. + +PCTL0__MMHUB_DEEPSLEEP_IB is 0x69004 on MMHUB 4,1,0 and +and 0x60804 on MMHUB 4,2,0. 0x62a04 is on MMHUB 1,8,0/1. + +The DS bits are adjusted to cover more JPEG engines and MMHUB +version. + +Signed-off-by: David (Ming Qiang) Wu <David.Wu3@amd.com> +Reviewed-by: Alex Deucher <alexander.deucher@amd.com> +Signed-off-by: Alex Deucher <alexander.deucher@amd.com> +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c | 52 +++++++++++++++++++++++++++---- + 1 file changed, 46 insertions(+), 6 deletions(-) + +--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c ++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +@@ -736,15 +736,35 @@ static void jpeg_v4_0_3_dec_ring_set_wpt + */ + void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) + { +- if (!amdgpu_sriov_vf(ring->adev)) { ++ struct amdgpu_device *adev = ring->adev; ++ ++ if (!amdgpu_sriov_vf(adev)) { ++ int jpeg_inst = GET_INST(JPEG, ring->me); ++ uint32_t value = 0x80004000; /* default DS14 */ ++ + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); +- amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ ++ ++ /* PCTL0__MMHUB_DEEPSLEEP_IB could be different on different mmhub version */ ++ switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { ++ case IP_VERSION(4, 1, 0): ++ amdgpu_ring_write(ring, 0x69004); ++ value = 0x80010000; ++ break; ++ case IP_VERSION(4, 2, 0): ++ amdgpu_ring_write(ring, 0x60804); ++ if (jpeg_inst & 1) ++ value = 0x80010000; ++ break; ++ default: ++ amdgpu_ring_write(ring, 0x62a04); ++ break; ++ } + + amdgpu_ring_write(ring, + PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, + 0, PACKETJ_TYPE0)); +- amdgpu_ring_write(ring, 0x80004000); ++ amdgpu_ring_write(ring, value); + } + } + +@@ -757,15 +777,35 @@ void jpeg_v4_0_3_dec_ring_insert_start(s + */ + void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring) + { +- if (!amdgpu_sriov_vf(ring->adev)) { ++ struct amdgpu_device *adev = ring->adev; ++ ++ if (!amdgpu_sriov_vf(adev)) { ++ int jpeg_inst = GET_INST(JPEG, ring->me); ++ uint32_t value = 0x00004000; /* default DS14 */ ++ + amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); +- amdgpu_ring_write(ring, 0x62a04); ++ ++ /* PCTL0__MMHUB_DEEPSLEEP_IB could be different on different mmhub version */ ++ switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { ++ case IP_VERSION(4, 1, 0): ++ amdgpu_ring_write(ring, 0x69004); ++ value = 0x00010000; ++ break; ++ case IP_VERSION(4, 2, 0): ++ amdgpu_ring_write(ring, 0x60804); ++ if (jpeg_inst & 1) ++ value = 0x00010000; ++ break; ++ default: ++ amdgpu_ring_write(ring, 0x62a04); ++ break; ++ } + + amdgpu_ring_write(ring, + PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, + 0, PACKETJ_TYPE0)); +- amdgpu_ring_write(ring, 0x00004000); ++ amdgpu_ring_write(ring, value); + } + } +
diff --git a/queue-7.0/apparmor-use-target-task-s-context-in-apparmor_getprocattr.patch b/queue-7.0/apparmor-use-target-task-s-context-in-apparmor_getprocattr.patch new file mode 100644 index 0000000..802dd8b --- /dev/null +++ b/queue-7.0/apparmor-use-target-task-s-context-in-apparmor_getprocattr.patch
@@ -0,0 +1,67 @@ +From 4afc61702bdcc3b9b519749ef966cf762a6e7051 Mon Sep 17 00:00:00 2001 +From: Cengiz Can <cengiz.can@canonical.com> +Date: Tue, 10 Feb 2026 11:17:14 +0300 +Subject: apparmor: use target task's context in apparmor_getprocattr() + +From: Cengiz Can <cengiz.can@canonical.com> + +commit 4afc61702bdcc3b9b519749ef966cf762a6e7051 upstream. + +apparmor_getprocattr() incorrectly calls task_ctx(current) instead of +task_ctx(task) when retrieving prev and exec attributes, returning the +caller's labels rather than the target's. + +Fix by passing task to task_ctx(). + +The issue can be reproduced when a process with an onexec transition +(e.g., configured by a container runtime) is inspected via +/proc/<pid>/attr/apparmor/exec. The reader's own value is returned +instead of the target's. + +Reported-by: Qualys Security Advisory <qsa@qualys.com> +Fixes: 3b529a7600d8 ("apparmor: move task domain change info to task security") +Cc: stable@vger.kernel.org +Co-developed-by: Cengiz Can <cengiz.can@canonical.com> +Signed-off-by: Cengiz Can <cengiz.can@canonical.com> +Co-developed-by: John Johansen <john.johansen@canonical.com> +Signed-off-by: John Johansen <john.johansen@canonical.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + security/apparmor/lsm.c | 16 +++++++--------- + 1 file changed, 7 insertions(+), 9 deletions(-) + +--- a/security/apparmor/lsm.c ++++ b/security/apparmor/lsm.c +@@ -822,25 +822,23 @@ static int apparmor_getprocattr(struct t + char **value) + { + int error = -ENOENT; +- /* released below */ +- const struct cred *cred = get_task_cred(task); +- struct aa_task_ctx *ctx = task_ctx(current); + struct aa_label *label = NULL; + ++ rcu_read_lock(); + if (strcmp(name, "current") == 0) +- label = aa_get_newest_label(cred_label(cred)); +- else if (strcmp(name, "prev") == 0 && ctx->previous) +- label = aa_get_newest_label(ctx->previous); +- else if (strcmp(name, "exec") == 0 && ctx->onexec) +- label = aa_get_newest_label(ctx->onexec); ++ label = aa_get_newest_cred_label(__task_cred(task)); ++ else if (strcmp(name, "prev") == 0 && task_ctx(task)->previous) ++ label = aa_get_newest_label(task_ctx(task)->previous); ++ else if (strcmp(name, "exec") == 0 && task_ctx(task)->onexec) ++ label = aa_get_newest_label(task_ctx(task)->onexec); + else + error = -EINVAL; ++ rcu_read_unlock(); + + if (label) + error = aa_getprocattr(label, value, true); + + aa_put_label(label); +- put_cred(cred); + + return error; + }
diff --git a/queue-7.0/arm-9472-1-fix-race-condition-on-pg_dcache_clean-in-__sync_icache_dcache.patch b/queue-7.0/arm-9472-1-fix-race-condition-on-pg_dcache_clean-in-__sync_icache_dcache.patch new file mode 100644 index 0000000..7d1dd81 --- /dev/null +++ b/queue-7.0/arm-9472-1-fix-race-condition-on-pg_dcache_clean-in-__sync_icache_dcache.patch
@@ -0,0 +1,55 @@ +From 75f9a484e817adea211c73f89ed938a2b2f90953 Mon Sep 17 00:00:00 2001 +From: Brian Ruley <brian.ruley@gehealthcare.com> +Date: Wed, 15 Apr 2026 18:12:48 +0100 +Subject: ARM: 9472/1: fix race condition on PG_dcache_clean in __sync_icache_dcache() + +From: Brian Ruley <brian.ruley@gehealthcare.com> + +commit 75f9a484e817adea211c73f89ed938a2b2f90953 upstream. + +This bug was already discovered and fixed for arm64 in +commit 588a513d3425 ("arm64: Fix race condition on PG_dcache_clean in +__sync_icache_dcache()"). + +Verified with added instrumentation to track dcache flushes in a ring +buffer, as shown by the (distilled) output: + + kernel: SIGILL at b6b80ac0 cpu 1 pid 32663 linux_pte=8eff659f + hw_pte=8eff6e7e young=1 exec=1 + kernel: dcache flush START cpu0 pfn=8eff6 ts=48629557020154 + kernel: dcache flush SKIPPED cpu1 pfn=8eff6 ts=48629557020154 + kernel: dcache flush FINISH cpu0 pfn=8eff6 ts=48629557036154 + audisp-syslog: comm="journalctl" exe="/usr/bin/journalctl" sig=4 [...] + +Discussions in the mailing list mentioned that arch/arm is also affected +but the fix was never applied to it [1][2]. Apply the change now, since +the race condition can cause sporadic SIGILL's and SEGV's especially +while under high memory pressure. + +Link: https://lore.kernel.org/all/adzMOdySgMIePcue@willie-the-truck [1] +Link: https://lore.kernel.org/all/20210514095001.13236-1-catalin.marinas@arm.com [2] +Signed-off-by: Brian Ruley <brian.ruley@gehealthcare.com> +Reviewed-by: Will Deacon <will@kernel.org> +Cc: <stable@vger.kernel.org> +Fixes: 6012191aa9c6 ("ARM: 6380/1: Introduce __sync_icache_dcache() for VIPT caches") +Signed-off-by: Will Deacon <will@kernel.org> +Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/arm/mm/flush.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/arch/arm/mm/flush.c ++++ b/arch/arm/mm/flush.c +@@ -304,8 +304,10 @@ void __sync_icache_dcache(pte_t pteval) + else + mapping = NULL; + +- if (!test_and_set_bit(PG_dcache_clean, &folio->flags.f)) ++ if (!test_bit(PG_dcache_clean, &folio->flags.f)) { + __flush_dcache_folio(mapping, folio); ++ set_bit(PG_dcache_clean, &folio->flags.f); ++ } + + if (pte_exec(pteval)) + __flush_icache_all();
diff --git a/queue-7.0/bluetooth-hci_event-fix-potential-uaf-in-ssp-passkey-handlers.patch b/queue-7.0/bluetooth-hci_event-fix-potential-uaf-in-ssp-passkey-handlers.patch new file mode 100644 index 0000000..0fc3570 --- /dev/null +++ b/queue-7.0/bluetooth-hci_event-fix-potential-uaf-in-ssp-passkey-handlers.patch
@@ -0,0 +1,90 @@ +From 85fa3512048793076eef658f66489112dcc91993 Mon Sep 17 00:00:00 2001 +From: Shuvam Pandey <shuvampandey1@gmail.com> +Date: Thu, 9 Apr 2026 00:32:30 +0545 +Subject: Bluetooth: hci_event: fix potential UAF in SSP passkey handlers + +From: Shuvam Pandey <shuvampandey1@gmail.com> + +commit 85fa3512048793076eef658f66489112dcc91993 upstream. + +hci_conn lookup and field access must be covered by hdev lock in +hci_user_passkey_notify_evt() and hci_keypress_notify_evt(), otherwise +the connection can be freed concurrently. + +Extend the hci_dev_lock critical section to cover all conn usage in both +handlers. + +Keep the existing keypress notification behavior unchanged by routing +the early exits through a common unlock path. + +Fixes: 92a25256f142 ("Bluetooth: mgmt: Implement support for passkey notification") +Cc: stable@vger.kernel.org +Signed-off-by: Shuvam Pandey <shuvampandey1@gmail.com> +Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + net/bluetooth/hci_event.c | 18 ++++++++++++++---- + 1 file changed, 14 insertions(+), 4 deletions(-) + +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -5498,9 +5498,11 @@ static void hci_user_passkey_notify_evt( + + bt_dev_dbg(hdev, ""); + ++ hci_dev_lock(hdev); ++ + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (!conn) +- return; ++ goto unlock; + + conn->passkey_notify = __le32_to_cpu(ev->passkey); + conn->passkey_entered = 0; +@@ -5509,6 +5511,9 @@ static void hci_user_passkey_notify_evt( + mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, + conn->dst_type, conn->passkey_notify, + conn->passkey_entered); ++ ++unlock: ++ hci_dev_unlock(hdev); + } + + static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, +@@ -5519,14 +5524,16 @@ static void hci_keypress_notify_evt(stru + + bt_dev_dbg(hdev, ""); + ++ hci_dev_lock(hdev); ++ + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (!conn) +- return; ++ goto unlock; + + switch (ev->type) { + case HCI_KEYPRESS_STARTED: + conn->passkey_entered = 0; +- return; ++ goto unlock; + + case HCI_KEYPRESS_ENTERED: + conn->passkey_entered++; +@@ -5541,13 +5548,16 @@ static void hci_keypress_notify_evt(stru + break; + + case HCI_KEYPRESS_COMPLETED: +- return; ++ goto unlock; + } + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, + conn->dst_type, conn->passkey_notify, + conn->passkey_entered); ++ ++unlock: ++ hci_dev_unlock(hdev); + } + + static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
diff --git a/queue-7.0/bus-mhi-host-pci_generic-switch-to-async-power-up-to-avoid-boot-delays.patch b/queue-7.0/bus-mhi-host-pci_generic-switch-to-async-power-up-to-avoid-boot-delays.patch new file mode 100644 index 0000000..37276d3 --- /dev/null +++ b/queue-7.0/bus-mhi-host-pci_generic-switch-to-async-power-up-to-avoid-boot-delays.patch
@@ -0,0 +1,40 @@ +From cfdb41adf1c2822ad1b1791d4d11093edb5582b6 Mon Sep 17 00:00:00 2001 +From: Qiang Yu <qiang.yu@oss.qualcomm.com> +Date: Tue, 3 Mar 2026 01:02:13 -0800 +Subject: bus: mhi: host: pci_generic: Switch to async power up to avoid boot delays + +From: Qiang Yu <qiang.yu@oss.qualcomm.com> + +commit cfdb41adf1c2822ad1b1791d4d11093edb5582b6 upstream. + +Some modem devices can take significant time (up to 20 secs for sdx75) to +enter mission mode during initialization. Currently, mhi_sync_power_up() +waits for this entire process to complete, blocking other driver probes +and delaying system boot. + +Switch to mhi_async_power_up() so probe can return immediately while MHI +initialization continues in the background. This eliminates lengthy boot +delays and allows other drivers to probe in parallel, improving overall +system boot performance. + +Fixes: 5571519009d0 ("bus: mhi: host: pci_generic: Add SDX75 based modem support") +Signed-off-by: Qiang Yu <qiang.yu@oss.qualcomm.com> +Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com> +Cc: stable@vger.kernel.org +Link: https://patch.msgid.link/20260303-b4-async_power_on-v2-1-d3db81eb457d@oss.qualcomm.com +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/bus/mhi/host/pci_generic.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/bus/mhi/host/pci_generic.c ++++ b/drivers/bus/mhi/host/pci_generic.c +@@ -1393,7 +1393,7 @@ static int mhi_pci_probe(struct pci_dev + goto err_unregister; + } + +- err = mhi_sync_power_up(mhi_cntrl); ++ err = mhi_async_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to power up MHI controller\n"); + goto err_unprepare;
diff --git a/queue-7.0/can-ucan-fix-devres-lifetime.patch b/queue-7.0/can-ucan-fix-devres-lifetime.patch new file mode 100644 index 0000000..c468a0a --- /dev/null +++ b/queue-7.0/can-ucan-fix-devres-lifetime.patch
@@ -0,0 +1,40 @@ +From fed4626501c871890da287bec62a96e52da1af89 Mon Sep 17 00:00:00 2001 +From: Johan Hovold <johan@kernel.org> +Date: Fri, 27 Mar 2026 11:45:20 +0100 +Subject: can: ucan: fix devres lifetime + +From: Johan Hovold <johan@kernel.org> + +commit fed4626501c871890da287bec62a96e52da1af89 upstream. + +USB drivers bind to USB interfaces and any device managed resources +should have their lifetime tied to the interface rather than parent USB +device. This avoids issues like memory leaks when drivers are unbound +without their devices being physically disconnected (e.g. on probe +deferral or configuration changes). + +Fix the control message buffer lifetime so that it is released on driver +unbind. + +Fixes: 9f2d3eae88d2 ("can: ucan: add driver for Theobroma Systems UCAN devices") +Cc: stable@vger.kernel.org # 4.19 +Cc: Jakob Unterwurzacher <jakob.unterwurzacher@theobroma-systems.com> +Signed-off-by: Johan Hovold <johan@kernel.org> +Link: https://patch.msgid.link/20260327104520.1310158-1-johan@kernel.org +Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/net/can/usb/ucan.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/can/usb/ucan.c ++++ b/drivers/net/can/usb/ucan.c +@@ -1397,7 +1397,7 @@ static int ucan_probe(struct usb_interfa + */ + + /* Prepare Memory for control transfers */ +- ctl_msg_buffer = devm_kzalloc(&udev->dev, ++ ctl_msg_buffer = devm_kzalloc(&intf->dev, + sizeof(union ucan_ctl_payload), + GFP_KERNEL); + if (!ctl_msg_buffer) {
diff --git a/queue-7.0/ceph-fix-num_ops-off-by-one-when-crypto-allocation-fails.patch b/queue-7.0/ceph-fix-num_ops-off-by-one-when-crypto-allocation-fails.patch new file mode 100644 index 0000000..bf5ab15 --- /dev/null +++ b/queue-7.0/ceph-fix-num_ops-off-by-one-when-crypto-allocation-fails.patch
@@ -0,0 +1,64 @@ +From a0d9555bf9eaeba34fe6b6bb86f442fe08ba3842 Mon Sep 17 00:00:00 2001 +From: Sam Edwards <cfsworks@gmail.com> +Date: Tue, 17 Mar 2026 19:37:33 -0700 +Subject: ceph: fix num_ops off-by-one when crypto allocation fails + +From: Sam Edwards <cfsworks@gmail.com> + +commit a0d9555bf9eaeba34fe6b6bb86f442fe08ba3842 upstream. + +move_dirty_folio_in_page_array() may fail if the file is encrypted, the +dirty folio is not the first in the batch, and it fails to allocate a +bounce buffer to hold the ciphertext. When that happens, +ceph_process_folio_batch() simply redirties the folio and flushes the +current batch -- it can retry that folio in a future batch. + +However, if this failed folio is not contiguous with the last folio that +did make it into the batch, then ceph_process_folio_batch() has already +incremented `ceph_wbc->num_ops`; because it doesn't follow through and +add the discontiguous folio to the array, ceph_submit_write() -- which +expects that `ceph_wbc->num_ops` accurately reflects the number of +contiguous ranges (and therefore the required number of "write extent" +ops) in the writeback -- will panic the kernel: + + BUG_ON(ceph_wbc->op_idx + 1 != req->r_num_ops); + +This issue can be reproduced on affected kernels by writing to +fscrypt-enabled CephFS file(s) with a 4KiB-written/4KiB-skipped/repeat +pattern (total filesize should not matter) and gradually increasing the +system's memory pressure until a bounce buffer allocation fails. + +Fix this crash by decrementing `ceph_wbc->num_ops` back to the correct +value when move_dirty_folio_in_page_array() fails, but the folio already +started counting a new (i.e. still-empty) extent. + +The defect corrected by this patch has existed since 2022 (see first +`Fixes:`), but another bug blocked multi-folio encrypted writeback until +recently (see second `Fixes:`). The second commit made it into 6.18.16, +6.19.6, and 7.0-rc1, unmasking the panic in those versions. This patch +therefore fixes a regression (panic) introduced by cac190c7674f. + +Cc: stable@vger.kernel.org +Fixes: d55207717ded ("ceph: add encryption support to writepage and writepages") +Fixes: cac190c7674f ("ceph: fix write storm on fscrypted files") +Signed-off-by: Sam Edwards <CFSworks@gmail.com> +Reviewed-by: Viacheslav Dubeyko <Slava.Dubeyko@ibm.com> +Signed-off-by: Ilya Dryomov <idryomov@gmail.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + fs/ceph/addr.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/fs/ceph/addr.c ++++ b/fs/ceph/addr.c +@@ -1365,6 +1365,10 @@ void ceph_process_folio_batch(struct add + rc = move_dirty_folio_in_page_array(mapping, wbc, ceph_wbc, + folio); + if (rc) { ++ /* Did we just begin a new contiguous op? Nevermind! */ ++ if (ceph_wbc->len == 0) ++ ceph_wbc->num_ops--; ++ + folio_redirty_for_writepage(wbc, folio); + folio_unlock(folio); + break;
diff --git a/queue-7.0/ceph-only-d_add-negative-dentries-when-they-are-unhashed.patch b/queue-7.0/ceph-only-d_add-negative-dentries-when-they-are-unhashed.patch new file mode 100644 index 0000000..75fad92 --- /dev/null +++ b/queue-7.0/ceph-only-d_add-negative-dentries-when-they-are-unhashed.patch
@@ -0,0 +1,108 @@ +From 803447f93d75ab6e40c85e6d12b5630d281d70d6 Mon Sep 17 00:00:00 2001 +From: Max Kellermann <max.kellermann@ionos.com> +Date: Fri, 27 Mar 2026 17:23:08 +0100 +Subject: ceph: only d_add() negative dentries when they are unhashed + +From: Max Kellermann <max.kellermann@ionos.com> + +commit 803447f93d75ab6e40c85e6d12b5630d281d70d6 upstream. + +Ceph can call d_add(dentry, NULL) on a negative dentry that is already +present in the primary dcache hash. + +In the current VFS that is not safe. d_add() goes through __d_add() +to __d_rehash(), which unconditionally reinserts dentry->d_hash into +the hlist_bl bucket. If the dentry is already hashed, reinserting the +same node can corrupt the bucket, including creating a self-loop. +Once that happens, __d_lookup() can spin forever in the hlist_bl walk, +typically looping only on the d_name.hash mismatch check and +eventually triggering RCU stall reports like this one: + + rcu: INFO: rcu_sched self-detected stall on CPU + rcu: 87-....: (2100 ticks this GP) idle=3a4c/1/0x4000000000000000 softirq=25003319/25003319 fqs=829 + rcu: (t=2101 jiffies g=79058445 q=698988 ncpus=192) + CPU: 87 UID: 2952868916 PID: 3933303 Comm: php-cgi8.3 Not tainted 6.18.17-i1-amd #950 NONE + Hardware name: Dell Inc. PowerEdge R7615/0G9DHV, BIOS 1.6.6 09/22/2023 + RIP: 0010:__d_lookup+0x46/0xb0 + Code: c1 e8 07 48 8d 04 c2 48 8b 00 49 89 fc 49 89 f5 48 89 c3 48 83 e3 fe 48 83 f8 01 77 0f eb 2d 0f 1f 44 00 00 48 8b 1b 48 85 db <74> 20 39 6b 18 75 f3 48 8d 7b 78 e8 ba 85 d0 00 4c 39 63 10 74 1f + RSP: 0018:ff745a70c8253898 EFLAGS: 00000282 + RAX: ff26e470054cb208 RBX: ff26e470054cb208 RCX: 000000006e958966 + RDX: ff26e48267340000 RSI: ff745a70c82539b0 RDI: ff26e458f74655c0 + RBP: 000000006e958966 R08: 0000000000000180 R09: 9cd08d909b919a89 + R10: ff26e458f74655c0 R11: 0000000000000000 R12: ff26e458f74655c0 + R13: ff745a70c82539b0 R14: d0d0d0d0d0d0d0d0 R15: 2f2f2f2f2f2f2f2f + FS: 00007f5770896980(0000) GS:ff26e482c5d88000(0000) knlGS:0000000000000000 + CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + CR2: 00007f5764de50c0 CR3: 000000a72abb5001 CR4: 0000000000771ef0 + PKRU: 55555554 + Call Trace: + <TASK> + lookup_fast+0x9f/0x100 + walk_component+0x1f/0x150 + link_path_walk+0x20e/0x3d0 + path_lookupat+0x68/0x180 + filename_lookup+0xdc/0x1e0 + vfs_statx+0x6c/0x140 + vfs_fstatat+0x67/0xa0 + __do_sys_newfstatat+0x24/0x60 + do_syscall_64+0x6a/0x230 + entry_SYSCALL_64_after_hwframe+0x76/0x7e + +This is reachable with reused cached negative dentries. A Ceph lookup +or atomic_open can be handed a negative dentry that is already hashed, +and fs/ceph/dir.c then hits one of two paths that incorrectly assume +"negative" also means "unhashed": + + - ceph_finish_lookup(): + MDS reply is -ENOENT with no trace + -> d_add(dentry, NULL) + + - ceph_lookup(): + local ENOENT fast path for a complete directory with shared caps + -> d_add(dentry, NULL) + +Both paths can therefore re-add an already-hashed negative dentry. + +Ceph already uses the correct pattern elsewhere: ceph_fill_trace() only +calls d_add(dn, NULL) for a negative null-dentry reply when d_unhashed(dn) +is true. + +Fix both fs/ceph/dir.c sites the same way: only call d_add() for a +negative dentry when it is actually unhashed. If the negative dentry +is already hashed, leave it in place and reuse it as-is. + +This preserves the existing behavior for unhashed dentries while +avoiding d_hash list corruption for reused hashed negatives. + +Cc: stable@vger.kernel.org +Fixes: 2817b000b02c ("ceph: directory operations") +Signed-off-by: Max Kellermann <max.kellermann@ionos.com> +Reviewed-by: Viacheslav Dubeyko <Slava.Dubeyko@ibm.com> +Signed-off-by: Ilya Dryomov <idryomov@gmail.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + fs/ceph/dir.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +--- a/fs/ceph/dir.c ++++ b/fs/ceph/dir.c +@@ -769,7 +769,8 @@ struct dentry *ceph_finish_lookup(struct + d_drop(dentry); + err = -ENOENT; + } else { +- d_add(dentry, NULL); ++ if (d_unhashed(dentry)) ++ d_add(dentry, NULL); + } + } + } +@@ -840,7 +841,8 @@ static struct dentry *ceph_lookup(struct + spin_unlock(&ci->i_ceph_lock); + doutc(cl, " dir %llx.%llx complete, -ENOENT\n", + ceph_vinop(dir)); +- d_add(dentry, NULL); ++ if (d_unhashed(dentry)) ++ d_add(dentry, NULL); + di->lease_shared_gen = atomic_read(&ci->i_shared_gen); + return NULL; + }
diff --git a/queue-7.0/check-uapi-link-into-shared-objects.patch b/queue-7.0/check-uapi-link-into-shared-objects.patch new file mode 100644 index 0000000..fbb3c89 --- /dev/null +++ b/queue-7.0/check-uapi-link-into-shared-objects.patch
@@ -0,0 +1,49 @@ +From a261f6dff3c1653c19c065c3b3650c625447b8a7 Mon Sep 17 00:00:00 2001 +From: Arnd Bergmann <arnd@arndb.de> +Date: Fri, 6 Mar 2026 17:33:07 +0100 +Subject: check-uapi: link into shared objects +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Arnd Bergmann <arnd@arndb.de> + +commit a261f6dff3c1653c19c065c3b3650c625447b8a7 upstream. + +While testing ABI changes across all architectures, I found that abidiff +sometimes produces nonsensical output. Further debugging identified +missing or broken libelf support for architecture specific relocations +in ET_REL binaries as the source of the problem[1]. + +Change the script to no longer produce a relocatable object file but +instead create a shared library for each header. This makes abidiff +work for all of the architectures in upstream linux kernels. + +Link: https://sourceware.org/bugzilla/show_bug.cgi?id=33869 +Cc: stable@vger.kernel.org +Signed-off-by: Arnd Bergmann <arnd@arndb.de> +Reviewed-by: Thomas Weißschuh <linux@weissschuh.net> +Acked-by: Nathan Chancellor <nathan@kernel.org> +Link: https://patch.msgid.link/20260306163309.2015837-2-arnd@kernel.org +Signed-off-by: Nicolas Schier <nsc@kernel.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + scripts/check-uapi.sh | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +--- a/scripts/check-uapi.sh ++++ b/scripts/check-uapi.sh +@@ -178,8 +178,11 @@ do_compile() { + local -r inc_dir="$1" + local -r header="$2" + local -r out="$3" +- printf "int main(void) { return 0; }\n" | \ +- "$CC" -c \ ++ printf "int f(void) { return 0; }\n" | \ ++ "$CC" \ ++ -shared \ ++ -nostdlib \ ++ -fPIC \ + -o "$out" \ + -x c \ + -O0 \
diff --git a/queue-7.0/crypto-acomp-fix-wrong-pointer-stored-by-acomp_save_req.patch b/queue-7.0/crypto-acomp-fix-wrong-pointer-stored-by-acomp_save_req.patch new file mode 100644 index 0000000..67ba74d --- /dev/null +++ b/queue-7.0/crypto-acomp-fix-wrong-pointer-stored-by-acomp_save_req.patch
@@ -0,0 +1,71 @@ +From d7e20b9bd6c990773cf0c09e2642250b8a70263d Mon Sep 17 00:00:00 2001 +From: Giovanni Cabiddu <giovanni.cabiddu@intel.com> +Date: Thu, 16 Apr 2026 18:07:00 +0100 +Subject: crypto: acomp - fix wrong pointer stored by acomp_save_req() + +From: Giovanni Cabiddu <giovanni.cabiddu@intel.com> + +commit d7e20b9bd6c990773cf0c09e2642250b8a70263d upstream. + +acomp_save_req() stores &req->chain in req->base.data. When +acomp_reqchain_done() is invoked on asynchronous completion, it receives +&req->chain as the data argument but casts it directly to struct +acomp_req. Since data points to the chain member, all subsequent field +accesses are at a wrong offset, resulting in memory corruption. + +The issue occurs when an asynchronous hardware implementation, such as +the QAT driver, completes a request that uses the DMA virtual address +interface (e.g. acomp_request_set_src_dma()). This combination causes +crypto_acomp_compress() to enter the acomp_do_req_chain() path, which +sets acomp_reqchain_done() as the completion callback via +acomp_save_req(). + +With KASAN enabled, this manifests as a general protection fault in +acomp_reqchain_done(): + + general protection fault, probably for non-canonical address 0xe000040000000000 + KASAN: probably user-memory-access in range [0x0000400000000000-0x0000400000000007] + RIP: 0010:acomp_reqchain_done+0x15b/0x4e0 + Call Trace: + <IRQ> + qat_comp_alg_callback+0x5d/0xa0 [intel_qat] + adf_ring_response_handler+0x376/0x8b0 [intel_qat] + adf_response_handler+0x60/0x170 [intel_qat] + tasklet_action_common+0x223/0x820 + handle_softirqs+0x1ab/0x640 + </IRQ> + +Fix this by storing the request itself in req->base.data instead of +&req->chain, so that acomp_reqchain_done() receives the correct pointer. +Simplify acomp_restore_req() accordingly to access req->chain directly. + +Fixes: 64929fe8c0a4 ("crypto: acomp - Remove request chaining") +Cc: stable@vger.kernel.org +Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + crypto/acompress.c | 8 +++----- + 1 file changed, 3 insertions(+), 5 deletions(-) + +--- a/crypto/acompress.c ++++ b/crypto/acompress.c +@@ -169,15 +169,13 @@ static void acomp_save_req(struct acomp_ + state->compl = req->base.complete; + state->data = req->base.data; + req->base.complete = cplt; +- req->base.data = state; ++ req->base.data = req; + } + + static void acomp_restore_req(struct acomp_req *req) + { +- struct acomp_req_chain *state = req->base.data; +- +- req->base.complete = state->compl; +- req->base.data = state->data; ++ req->base.complete = req->chain.compl; ++ req->base.data = req->chain.data; + } + + static void acomp_reqchain_virt(struct acomp_req *req)
diff --git a/queue-7.0/crypto-arm64-aes-fix-32-bit-aes_mac_update-arg-treated-as-64-bit.patch b/queue-7.0/crypto-arm64-aes-fix-32-bit-aes_mac_update-arg-treated-as-64-bit.patch new file mode 100644 index 0000000..ffd3840 --- /dev/null +++ b/queue-7.0/crypto-arm64-aes-fix-32-bit-aes_mac_update-arg-treated-as-64-bit.patch
@@ -0,0 +1,44 @@ +From f8f08d7cc43237e91e3aedf7b67d015d24c38fcc Mon Sep 17 00:00:00 2001 +From: Eric Biggers <ebiggers@kernel.org> +Date: Wed, 18 Feb 2026 13:34:49 -0800 +Subject: crypto: arm64/aes - Fix 32-bit aes_mac_update() arg treated as 64-bit + +From: Eric Biggers <ebiggers@kernel.org> + +commit f8f08d7cc43237e91e3aedf7b67d015d24c38fcc upstream. + +Since the 'enc_after' argument to neon_aes_mac_update() and +ce_aes_mac_update() has type 'int', it needs to be accessed using the +corresponding 32-bit register, not the 64-bit register. The upper half +of the corresponding 64-bit register may contain garbage. + +Fixes: 4860620da7e5 ("crypto: arm64/aes - add NEON/Crypto Extensions CBCMAC/CMAC/XCBC driver") +Cc: stable@vger.kernel.org +Reviewed-by: Ard Biesheuvel <ardb@kernel.org> +Link: https://lore.kernel.org/r/20260218213501.136844-4-ebiggers@kernel.org +Signed-off-by: Eric Biggers <ebiggers@kernel.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/arm64/crypto/aes-modes.S | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/arm64/crypto/aes-modes.S ++++ b/arch/arm64/crypto/aes-modes.S +@@ -838,7 +838,7 @@ AES_FUNC_START(aes_mac_update) + encrypt_block v0, w2, x1, x7, w8 + eor v0.16b, v0.16b, v4.16b + cmp w3, wzr +- csinv x5, x6, xzr, eq ++ csinv w5, w6, wzr, eq + cbz w5, .Lmacout + encrypt_block v0, w2, x1, x7, w8 + st1 {v0.16b}, [x4] /* return dg */ +@@ -852,7 +852,7 @@ AES_FUNC_START(aes_mac_update) + eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */ + + subs w3, w3, #1 +- csinv x5, x6, xzr, eq ++ csinv w5, w6, wzr, eq + cbz w5, .Lmacout + + .Lmacenc:
diff --git a/queue-7.0/crypto-atmel-aes-fix-3-page-memory-leak-in-atmel_aes_buff_cleanup.patch b/queue-7.0/crypto-atmel-aes-fix-3-page-memory-leak-in-atmel_aes_buff_cleanup.patch new file mode 100644 index 0000000..6cb0a8d --- /dev/null +++ b/queue-7.0/crypto-atmel-aes-fix-3-page-memory-leak-in-atmel_aes_buff_cleanup.patch
@@ -0,0 +1,34 @@ +From 3fcfff4ed35f963380a68741bcd52742baff7f76 Mon Sep 17 00:00:00 2001 +From: Thorsten Blum <thorsten.blum@linux.dev> +Date: Wed, 11 Mar 2026 03:07:35 +0100 +Subject: crypto: atmel-aes - Fix 3-page memory leak in atmel_aes_buff_cleanup + +From: Thorsten Blum <thorsten.blum@linux.dev> + +commit 3fcfff4ed35f963380a68741bcd52742baff7f76 upstream. + +atmel_aes_buff_init() allocates 4 pages using __get_free_pages() with +ATMEL_AES_BUFFER_ORDER, but atmel_aes_buff_cleanup() frees only the +first page using free_page(), leaking the remaining 3 pages. Use +free_pages() with ATMEL_AES_BUFFER_ORDER to fix the memory leak. + +Fixes: bbe628ed897d ("crypto: atmel-aes - improve performances of data transfer") +Cc: stable@vger.kernel.org +Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/crypto/atmel-aes.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/crypto/atmel-aes.c ++++ b/drivers/crypto/atmel-aes.c +@@ -2131,7 +2131,7 @@ static int atmel_aes_buff_init(struct at + + static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) + { +- free_page((unsigned long)dd->buf); ++ free_pages((unsigned long)dd->buf, ATMEL_AES_BUFFER_ORDER); + } + + static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
diff --git a/queue-7.0/crypto-atmel-ecc-release-client-on-allocation-failure.patch b/queue-7.0/crypto-atmel-ecc-release-client-on-allocation-failure.patch new file mode 100644 index 0000000..b0bb356 --- /dev/null +++ b/queue-7.0/crypto-atmel-ecc-release-client-on-allocation-failure.patch
@@ -0,0 +1,32 @@ +From 095d50008d55d13f8fcf1bbeb7c6eba51779bc85 Mon Sep 17 00:00:00 2001 +From: Thorsten Blum <thorsten.blum@linux.dev> +Date: Fri, 20 Feb 2026 15:03:13 +0100 +Subject: crypto: atmel-ecc - Release client on allocation failure + +From: Thorsten Blum <thorsten.blum@linux.dev> + +commit 095d50008d55d13f8fcf1bbeb7c6eba51779bc85 upstream. + +Call atmel_ecc_i2c_client_free() to release the I2C client reserved by +atmel_ecc_i2c_client_alloc() when crypto_alloc_kpp() fails. Otherwise +->tfm_count will be out of sync. + +Fixes: 11105693fa05 ("crypto: atmel-ecc - introduce Microchip / Atmel ECC driver") +Cc: stable@vger.kernel.org +Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/crypto/atmel-ecc.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/crypto/atmel-ecc.c ++++ b/drivers/crypto/atmel-ecc.c +@@ -261,6 +261,7 @@ static int atmel_ecdh_init_tfm(struct cr + if (IS_ERR(fallback)) { + dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); ++ atmel_ecc_i2c_client_free(ctx->client); + return PTR_ERR(fallback); + } +
diff --git a/queue-7.0/crypto-atmel-sha204a-fix-error-codes-in-otp-reads.patch b/queue-7.0/crypto-atmel-sha204a-fix-error-codes-in-otp-reads.patch new file mode 100644 index 0000000..0348a86 --- /dev/null +++ b/queue-7.0/crypto-atmel-sha204a-fix-error-codes-in-otp-reads.patch
@@ -0,0 +1,66 @@ +From 094c276da6a0d4971c3faae09a36b51d096659b2 Mon Sep 17 00:00:00 2001 +From: Thorsten Blum <thorsten.blum@linux.dev> +Date: Sun, 15 Feb 2026 21:51:53 +0100 +Subject: crypto: atmel-sha204a - Fix error codes in OTP reads + +From: Thorsten Blum <thorsten.blum@linux.dev> + +commit 094c276da6a0d4971c3faae09a36b51d096659b2 upstream. + +Return -EINVAL from atmel_i2c_init_read_otp_cmd() on invalid addresses +instead of -1. Since the OTP zone is accessed in 4-byte blocks, valid +addresses range from 0 to OTP_ZONE_SIZE / 4 - 1. Fix the bounds check +accordingly. + +In atmel_sha204a_otp_read(), propagate the actual error code from +atmel_i2c_init_read_otp_cmd() instead of -1. Also, return -EIO instead +of -EINVAL when the device is not ready. + +Cc: stable@vger.kernel.org +Fixes: e05ce444e9e5 ("crypto: atmel-sha204a - add reading from otp zone") +Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev> +Reviewed-by: Lothar Rubusch <l.rubusch@gmail.com> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/crypto/atmel-i2c.c | 4 ++-- + drivers/crypto/atmel-sha204a.c | 7 ++++--- + 2 files changed, 6 insertions(+), 5 deletions(-) + +--- a/drivers/crypto/atmel-i2c.c ++++ b/drivers/crypto/atmel-i2c.c +@@ -72,8 +72,8 @@ EXPORT_SYMBOL(atmel_i2c_init_read_config + + int atmel_i2c_init_read_otp_cmd(struct atmel_i2c_cmd *cmd, u16 addr) + { +- if (addr < 0 || addr > OTP_ZONE_SIZE) +- return -1; ++ if (addr >= OTP_ZONE_SIZE / 4) ++ return -EINVAL; + + cmd->word_addr = COMMAND; + cmd->opcode = OPCODE_READ; +--- a/drivers/crypto/atmel-sha204a.c ++++ b/drivers/crypto/atmel-sha204a.c +@@ -96,9 +96,10 @@ static int atmel_sha204a_rng_read(struct + static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp) + { + struct atmel_i2c_cmd cmd; +- int ret = -1; ++ int ret; + +- if (atmel_i2c_init_read_otp_cmd(&cmd, addr) < 0) { ++ ret = atmel_i2c_init_read_otp_cmd(&cmd, addr); ++ if (ret < 0) { + dev_err(&client->dev, "failed, invalid otp address %04X\n", + addr); + return ret; +@@ -108,7 +109,7 @@ static int atmel_sha204a_otp_read(struct + + if (cmd.data[0] == 0xff) { + dev_err(&client->dev, "failed, device not ready\n"); +- return -EINVAL; ++ return -EIO; + } + + memcpy(otp, cmd.data+1, 4);
diff --git a/queue-7.0/crypto-atmel-sha204a-fix-potential-uaf-and-memory-leak-in-remove-path.patch b/queue-7.0/crypto-atmel-sha204a-fix-potential-uaf-and-memory-leak-in-remove-path.patch new file mode 100644 index 0000000..bb39d56 --- /dev/null +++ b/queue-7.0/crypto-atmel-sha204a-fix-potential-uaf-and-memory-leak-in-remove-path.patch
@@ -0,0 +1,40 @@ +From bab1adf3b87e4bfac92c4f5963c63db434d561c1 Mon Sep 17 00:00:00 2001 +From: Thorsten Blum <thorsten.blum@linux.dev> +Date: Sat, 14 Mar 2026 20:36:29 +0100 +Subject: crypto: atmel-sha204a - Fix potential UAF and memory leak in remove path + +From: Thorsten Blum <thorsten.blum@linux.dev> + +commit bab1adf3b87e4bfac92c4f5963c63db434d561c1 upstream. + +Unregister the hwrng to prevent new ->read() calls and flush the Atmel +I2C workqueue before teardown to prevent a potential UAF if a queued +callback runs while the device is being removed. + +Drop the early return to ensure sysfs entries are removed and +->hwrng.priv is freed, preventing a memory leak. + +Fixes: da001fb651b0 ("crypto: atmel-i2c - add support for SHA204A random number generator") +Cc: stable@vger.kernel.org +Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/crypto/atmel-sha204a.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +--- a/drivers/crypto/atmel-sha204a.c ++++ b/drivers/crypto/atmel-sha204a.c +@@ -194,10 +194,8 @@ static void atmel_sha204a_remove(struct + { + struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); + +- if (atomic_read(&i2c_priv->tfm_count)) { +- dev_emerg(&client->dev, "Device is busy, will remove it anyhow\n"); +- return; +- } ++ devm_hwrng_unregister(&client->dev, &i2c_priv->hwrng); ++ atmel_i2c_flush_queue(); + + sysfs_remove_group(&client->dev.kobj, &atmel_sha204a_groups); +
diff --git a/queue-7.0/crypto-atmel-sha204a-fix-uninitialized-data-access-on-otp-read-error.patch b/queue-7.0/crypto-atmel-sha204a-fix-uninitialized-data-access-on-otp-read-error.patch new file mode 100644 index 0000000..e6956a8 --- /dev/null +++ b/queue-7.0/crypto-atmel-sha204a-fix-uninitialized-data-access-on-otp-read-error.patch
@@ -0,0 +1,34 @@ +From de4e66b763d1e81188cb2803ec109466582fc9d1 Mon Sep 17 00:00:00 2001 +From: Thorsten Blum <thorsten.blum@linux.dev> +Date: Fri, 20 Feb 2026 14:31:36 +0100 +Subject: crypto: atmel-sha204a - Fix uninitialized data access on OTP read error + +From: Thorsten Blum <thorsten.blum@linux.dev> + +commit de4e66b763d1e81188cb2803ec109466582fc9d1 upstream. + +Return early if atmel_i2c_send_receive() fails to avoid checking +potentially uninitialized data in 'cmd.data'. + +Cc: stable@vger.kernel.org +Fixes: e05ce444e9e5 ("crypto: atmel-sha204a - add reading from otp zone") +Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/crypto/atmel-sha204a.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/drivers/crypto/atmel-sha204a.c ++++ b/drivers/crypto/atmel-sha204a.c +@@ -106,6 +106,10 @@ static int atmel_sha204a_otp_read(struct + } + + ret = atmel_i2c_send_receive(client, &cmd); ++ if (ret < 0) { ++ dev_err(&client->dev, "failed to read otp at %04X\n", addr); ++ return ret; ++ } + + if (cmd.data[0] == 0xff) { + dev_err(&client->dev, "failed, device not ready\n");
diff --git a/queue-7.0/crypto-atmel-tdes-fix-dma-sync-direction.patch b/queue-7.0/crypto-atmel-tdes-fix-dma-sync-direction.patch new file mode 100644 index 0000000..c536d2b --- /dev/null +++ b/queue-7.0/crypto-atmel-tdes-fix-dma-sync-direction.patch
@@ -0,0 +1,48 @@ +From c8a9a647532f5c2a04180352693215e24e9dba03 Mon Sep 17 00:00:00 2001 +From: Thorsten Blum <thorsten.blum@linux.dev> +Date: Sat, 7 Mar 2026 16:31:10 +0100 +Subject: crypto: atmel-tdes - fix DMA sync direction + +From: Thorsten Blum <thorsten.blum@linux.dev> + +commit c8a9a647532f5c2a04180352693215e24e9dba03 upstream. + +Before DMA output is consumed by the CPU, ->dma_addr_out must be synced +with dma_sync_single_for_cpu() instead of dma_sync_single_for_device(). +Using the wrong direction can return stale cache data on non-coherent +platforms. + +Fixes: 13802005d8f2 ("crypto: atmel - add Atmel DES/TDES driver") +Fixes: 1f858040c2f7 ("crypto: atmel-tdes - add support for latest release of the IP (0x700)") +Cc: stable@vger.kernel.org +Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/crypto/atmel-tdes.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/crypto/atmel-tdes.c ++++ b/drivers/crypto/atmel-tdes.c +@@ -294,8 +294,8 @@ static int atmel_tdes_crypt_pdc_stop(str + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + } else { +- dma_sync_single_for_device(dd->dev, dd->dma_addr_out, +- dd->dma_size, DMA_FROM_DEVICE); ++ dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, ++ dd->dma_size, DMA_FROM_DEVICE); + + /* copy data */ + count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset, +@@ -619,8 +619,8 @@ static int atmel_tdes_crypt_dma_stop(str + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + } else { +- dma_sync_single_for_device(dd->dev, dd->dma_addr_out, +- dd->dma_size, DMA_FROM_DEVICE); ++ dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, ++ dd->dma_size, DMA_FROM_DEVICE); + + /* copy data */ + count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
diff --git a/queue-7.0/crypto-ccree-fix-a-memory-leak-in-cc_mac_digest.patch b/queue-7.0/crypto-ccree-fix-a-memory-leak-in-cc_mac_digest.patch new file mode 100644 index 0000000..54319aa --- /dev/null +++ b/queue-7.0/crypto-ccree-fix-a-memory-leak-in-cc_mac_digest.patch
@@ -0,0 +1,31 @@ +From 02c64052fad03699b9c6d1df2f9b444d17e4ac50 Mon Sep 17 00:00:00 2001 +From: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn> +Date: Mon, 30 Mar 2026 11:34:02 +0800 +Subject: crypto: ccree - fix a memory leak in cc_mac_digest() + +From: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn> + +commit 02c64052fad03699b9c6d1df2f9b444d17e4ac50 upstream. + +Add cc_unmap_result() if cc_map_hash_request_final() +fails to prevent potential memory leak. + +Fixes: 63893811b0fc ("crypto: ccree - add ahash support") +Cc: stable@vger.kernel.org +Signed-off-by: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/crypto/ccree/cc_hash.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/crypto/ccree/cc_hash.c ++++ b/drivers/crypto/ccree/cc_hash.c +@@ -1448,6 +1448,7 @@ static int cc_mac_digest(struct ahash_re + if (cc_map_hash_request_final(ctx->drvdata, state, req->src, + req->nbytes, 1, flags)) { + dev_err(dev, "map_ahash_request_final() failed\n"); ++ cc_unmap_result(dev, state, digestsize, req->result); + cc_unmap_req(dev, state, ctx); + return -ENOMEM; + }
diff --git a/queue-7.0/crypto-hisilicon-fix-dma_unmap_single-direction.patch b/queue-7.0/crypto-hisilicon-fix-dma_unmap_single-direction.patch new file mode 100644 index 0000000..f297085 --- /dev/null +++ b/queue-7.0/crypto-hisilicon-fix-dma_unmap_single-direction.patch
@@ -0,0 +1,35 @@ +From 1ee57ab93b75eb59f426aef37b5498a7ffc28278 Mon Sep 17 00:00:00 2001 +From: Thomas Fourier <fourier.thomas@gmail.com> +Date: Mon, 30 Mar 2026 17:19:32 +0200 +Subject: crypto: hisilicon - Fix dma_unmap_single() direction + +From: Thomas Fourier <fourier.thomas@gmail.com> + +commit 1ee57ab93b75eb59f426aef37b5498a7ffc28278 upstream. + +The direction used to map the buffer skreq->iv is DMA_TO_DEVICE but it is +unmapped with direction DMA_BIDIRECTIONAL in the error path. + +Change the unmap to match the mapping. + +Fixes: 915e4e8413da ("crypto: hisilicon - SEC security accelerator driver") +Cc: <stable@vger.kernel.org> +Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com> +Reviewed-by: Thorsten Blum <thorsten.blum@linux.dev> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/crypto/hisilicon/sec/sec_algs.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/crypto/hisilicon/sec/sec_algs.c ++++ b/drivers/crypto/hisilicon/sec/sec_algs.c +@@ -844,7 +844,7 @@ err_free_elements: + if (crypto_skcipher_ivsize(atfm)) + dma_unmap_single(info->dev, sec_req->dma_iv, + crypto_skcipher_ivsize(atfm), +- DMA_BIDIRECTIONAL); ++ DMA_TO_DEVICE); + err_unmap_out_sg: + if (split) + sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
diff --git a/queue-7.0/crypto-nx-fix-bounce-buffer-leaks-in-nx842_crypto_-alloc-free-_ctx.patch b/queue-7.0/crypto-nx-fix-bounce-buffer-leaks-in-nx842_crypto_-alloc-free-_ctx.patch new file mode 100644 index 0000000..8451d45 --- /dev/null +++ b/queue-7.0/crypto-nx-fix-bounce-buffer-leaks-in-nx842_crypto_-alloc-free-_ctx.patch
@@ -0,0 +1,47 @@ +From adb3faf2db1a66d0f015b44ac909a32dfc7f2f9c Mon Sep 17 00:00:00 2001 +From: Thorsten Blum <thorsten.blum@linux.dev> +Date: Wed, 11 Mar 2026 16:56:47 +0100 +Subject: crypto: nx - fix bounce buffer leaks in nx842_crypto_{alloc,free}_ctx + +From: Thorsten Blum <thorsten.blum@linux.dev> + +commit adb3faf2db1a66d0f015b44ac909a32dfc7f2f9c upstream. + +The bounce buffers are allocated with __get_free_pages() using +BOUNCE_BUFFER_ORDER (order 2 = 4 pages), but both the allocation error +path and nx842_crypto_free_ctx() release the buffers with free_page(). +Use free_pages() with the matching order instead. + +Fixes: ed70b479c2c0 ("crypto: nx - add hardware 842 crypto comp alg") +Cc: stable@vger.kernel.org +Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/crypto/nx/nx-842.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/crypto/nx/nx-842.c ++++ b/drivers/crypto/nx/nx-842.c +@@ -116,8 +116,8 @@ void *nx842_crypto_alloc_ctx(struct nx84 + ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); + if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { + kfree(ctx->wmem); +- free_page((unsigned long)ctx->sbounce); +- free_page((unsigned long)ctx->dbounce); ++ free_pages((unsigned long)ctx->sbounce, BOUNCE_BUFFER_ORDER); ++ free_pages((unsigned long)ctx->dbounce, BOUNCE_BUFFER_ORDER); + kfree(ctx); + return ERR_PTR(-ENOMEM); + } +@@ -131,8 +131,8 @@ void nx842_crypto_free_ctx(void *p) + struct nx842_crypto_ctx *ctx = p; + + kfree(ctx->wmem); +- free_page((unsigned long)ctx->sbounce); +- free_page((unsigned long)ctx->dbounce); ++ free_pages((unsigned long)ctx->sbounce, BOUNCE_BUFFER_ORDER); ++ free_pages((unsigned long)ctx->dbounce, BOUNCE_BUFFER_ORDER); + } + EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx); +
diff --git a/queue-7.0/crypto-nx-fix-context-leak-in-nx842_crypto_free_ctx.patch b/queue-7.0/crypto-nx-fix-context-leak-in-nx842_crypto_free_ctx.patch new file mode 100644 index 0000000..8068a81 --- /dev/null +++ b/queue-7.0/crypto-nx-fix-context-leak-in-nx842_crypto_free_ctx.patch
@@ -0,0 +1,46 @@ +From 344e6a4f7ff4756b9b3f75e0eb7eaec297e35540 Mon Sep 17 00:00:00 2001 +From: Thorsten Blum <thorsten.blum@linux.dev> +Date: Wed, 11 Mar 2026 16:56:49 +0100 +Subject: crypto: nx - fix context leak in nx842_crypto_free_ctx + +From: Thorsten Blum <thorsten.blum@linux.dev> + +commit 344e6a4f7ff4756b9b3f75e0eb7eaec297e35540 upstream. + +Since the scomp conversion, nx842_crypto_alloc_ctx() allocates the +context separately, but nx842_crypto_free_ctx() never releases it. Add +the missing kfree(ctx) to nx842_crypto_free_ctx(), and reuse +nx842_crypto_free_ctx() in the allocation error path. + +Fixes: 980b5705f4e7 ("crypto: nx - Migrate to scomp API") +Cc: stable@vger.kernel.org +Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev> +Reviewed-by: Ard Biesheuvel <ardb@kernel.org> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/crypto/nx/nx-842.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +--- a/drivers/crypto/nx/nx-842.c ++++ b/drivers/crypto/nx/nx-842.c +@@ -115,10 +115,7 @@ void *nx842_crypto_alloc_ctx(struct nx84 + ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); + ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); + if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { +- kfree(ctx->wmem); +- free_pages((unsigned long)ctx->sbounce, BOUNCE_BUFFER_ORDER); +- free_pages((unsigned long)ctx->dbounce, BOUNCE_BUFFER_ORDER); +- kfree(ctx); ++ nx842_crypto_free_ctx(ctx); + return ERR_PTR(-ENOMEM); + } + +@@ -133,6 +130,7 @@ void nx842_crypto_free_ctx(void *p) + kfree(ctx->wmem); + free_pages((unsigned long)ctx->sbounce, BOUNCE_BUFFER_ORDER); + free_pages((unsigned long)ctx->dbounce, BOUNCE_BUFFER_ORDER); ++ kfree(ctx); + } + EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx); +
diff --git a/queue-7.0/crypto-nx-fix-packed-layout-in-struct-nx842_crypto_header.patch b/queue-7.0/crypto-nx-fix-packed-layout-in-struct-nx842_crypto_header.patch new file mode 100644 index 0000000..5352f69 --- /dev/null +++ b/queue-7.0/crypto-nx-fix-packed-layout-in-struct-nx842_crypto_header.patch
@@ -0,0 +1,81 @@ +From b0bfa49c03e3c65737eafa73d8a698eaf55379a6 Mon Sep 17 00:00:00 2001 +From: "Gustavo A. R. Silva" <gustavoars@kernel.org> +Date: Tue, 17 Mar 2026 17:40:02 -0600 +Subject: crypto: nx - Fix packed layout in struct nx842_crypto_header + +From: Gustavo A. R. Silva <gustavoars@kernel.org> + +commit b0bfa49c03e3c65737eafa73d8a698eaf55379a6 upstream. + +struct nx842_crypto_header is declared with the __packed attribute, +however the fields grouped with struct_group_tagged() were not packed. +This caused the grouped header portion of the structure to lose the +packed layout guarantees of the containing structure. + +Fix this by replacing struct_group_tagged() with __struct_group(..., +..., __packed, ...) so the grouped fields are packed, and the original +layout is preserved, restoring the intended packed layout of the +structure. + +Before changes: +struct nx842_crypto_header { + union { + struct { + __be16 magic; /* 0 2 */ + __be16 ignore; /* 2 2 */ + u8 groups; /* 4 1 */ + }; /* 0 6 */ + struct nx842_crypto_header_hdr hdr; /* 0 6 */ + }; /* 0 6 */ + struct nx842_crypto_header_group group[]; /* 6 0 */ + + /* size: 6, cachelines: 1, members: 2 */ + /* last cacheline: 6 bytes */ +} __attribute__((__packed__)); + +After changes: +struct nx842_crypto_header { + union { + struct { + __be16 magic; /* 0 2 */ + __be16 ignore; /* 2 2 */ + u8 groups; /* 4 1 */ + } __attribute__((__packed__)); /* 0 5 */ + struct nx842_crypto_header_hdr hdr; /* 0 5 */ + }; /* 0 5 */ + struct nx842_crypto_header_group group[]; /* 5 0 */ + + /* size: 5, cachelines: 1, members: 2 */ + /* last cacheline: 5 bytes */ +} __attribute__((__packed__)); + +Fixes: 1e6b251ce175 ("crypto: nx - Avoid -Wflex-array-member-not-at-end warning") +Cc: stable@vger.kernel.org +Signed-off-by: Gustavo A. R. Silva <gustavoars@kernel.org> +Reviewed-by: Thorsten Blum <thorsten.blum@linux.dev> +Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/crypto/nx/nx-842.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/crypto/nx/nx-842.h ++++ b/drivers/crypto/nx/nx-842.h +@@ -159,7 +159,7 @@ struct nx842_crypto_header_group { + + struct nx842_crypto_header { + /* New members MUST be added within the struct_group() macro below. */ +- struct_group_tagged(nx842_crypto_header_hdr, hdr, ++ __struct_group(nx842_crypto_header_hdr, hdr, __packed, + __be16 magic; /* NX842_CRYPTO_MAGIC */ + __be16 ignore; /* decompressed end bytes to ignore */ + u8 groups; /* total groups in this header */ +@@ -167,7 +167,7 @@ struct nx842_crypto_header { + struct nx842_crypto_header_group group[]; + } __packed; + static_assert(offsetof(struct nx842_crypto_header, group) == sizeof(struct nx842_crypto_header_hdr), +- "struct member likely outside of struct_group_tagged()"); ++ "struct member likely outside of __struct_group()"); + + #define NX842_CRYPTO_GROUP_MAX (0x20) +
diff --git a/queue-7.0/dm-mirror-fix-integer-overflow-in-create_dirty_log.patch b/queue-7.0/dm-mirror-fix-integer-overflow-in-create_dirty_log.patch new file mode 100644 index 0000000..a172733 --- /dev/null +++ b/queue-7.0/dm-mirror-fix-integer-overflow-in-create_dirty_log.patch
@@ -0,0 +1,53 @@ +From 4c788c6f921b22f9b6c3f316c4a071c05683e7de Mon Sep 17 00:00:00 2001 +From: Junrui Luo <moonafterrain@outlook.com> +Date: Sun, 1 Mar 2026 21:10:58 +0800 +Subject: dm mirror: fix integer overflow in create_dirty_log() + +From: Junrui Luo <moonafterrain@outlook.com> + +commit 4c788c6f921b22f9b6c3f316c4a071c05683e7de upstream. + +The argument count calculation in create_dirty_log() performs +`*args_used = 2 + param_count` before validating against argc. When a +user provides a param_count close to UINT_MAX via the device mapper +table string, this unsigned addition wraps around to a small value, +causing the subsequent `argc < *args_used` check to be bypassed. + +The overflowed param_count is then passed as argc to dm_dirty_log_create(), +where it can cause out-of-bounds reads on the argv array. + +Fix by comparing param_count against argc - 2 before performing the +addition, following the same pattern used by parse_features() in the +same file. Since argc >= 2 is already guaranteed, the subtraction is +safe. + +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Cc: stable@vger.kernel.org +Reported-by: Yuhao Jiang <danisjiang@gmail.com> +Signed-off-by: Junrui Luo <moonafterrain@outlook.com> +Reviewed-by: Benjamin Marzinski <bmarzins@redhat.com> +Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/md/dm-raid1.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/md/dm-raid1.c ++++ b/drivers/md/dm-raid1.c +@@ -993,13 +993,13 @@ static struct dm_dirty_log *create_dirty + return NULL; + } + +- *args_used = 2 + param_count; +- +- if (argc < *args_used) { ++ if (param_count > argc - 2) { + ti->error = "Insufficient mirror log arguments"; + return NULL; + } + ++ *args_used = 2 + param_count; ++ + dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count, + argv + 2); + if (!dl) {
diff --git a/queue-7.0/erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch b/queue-7.0/erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch new file mode 100644 index 0000000..5d14207 --- /dev/null +++ b/queue-7.0/erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch
@@ -0,0 +1,53 @@ +From 21e161de2dc660b1bb70ef5b156ab8e6e1cca3ab Mon Sep 17 00:00:00 2001 +From: Junrui Luo <moonafterrain@outlook.com> +Date: Thu, 9 Apr 2026 21:59:39 +0800 +Subject: erofs: fix unsigned underflow in z_erofs_lz4_handle_overlap() + +From: Junrui Luo <moonafterrain@outlook.com> + +commit 21e161de2dc660b1bb70ef5b156ab8e6e1cca3ab upstream. + +Some crafted images can have illegal (!partial_decoding && +m_llen < m_plen) extents, and the LZ4 inplace decompression path +can be wrongly hit, but it cannot handle (outpages < inpages) +properly: "outpages - inpages" wraps to a large value and +the subsequent rq->out[] access reads past the decompressed_pages +array. + +However, such crafted cases can correctly result in a corruption +report in the normal LZ4 non-inplace path. + +Let's add an additional check to fix this for backporting. + +Reproducible image (base64-encoded gzipped blob): + +H4sIAJGR12kCA+3SPUoDQRgG4MkmkkZk8QRbRFIIi9hbpEjrHQI5ghfwCN5BLCzTGtLbBI+g +dilSJo1CnIm7GEXFxhT6PDDwfrs73/ywIQD/1ePD4r7Ou6ETsrq4mu7XcWfj++Pb58nJU/9i +PNtbjhan04/9GtX4qVYc814WDqt6FaX5s+ZwXXeq52lndT6IuVvlblytLMvh4Gzwaf90nsvz +2DF/21+20T/ldgp5s1jXRaN4t/8izsy/OUB6e/Qa79r+JwAAAAAAAL52vQVuGQAAAP6+my1w +ywAAAAAAAADwu14ATsEYtgBQAAA= + +$ mount -t erofs -o cache_strategy=disabled foo.erofs /mnt +$ dd if=/mnt/data of=/dev/null bs=4096 count=1 + +Fixes: 598162d05080 ("erofs: support decompress big pcluster for lz4 backend") +Reported-by: Yuhao Jiang <danisjiang@gmail.com> +Cc: stable@vger.kernel.org +Signed-off-by: Junrui Luo <moonafterrain@outlook.com> +Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com> +Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + fs/erofs/decompressor.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/fs/erofs/decompressor.c ++++ b/fs/erofs/decompressor.c +@@ -145,6 +145,7 @@ static void *z_erofs_lz4_handle_overlap( + oend = rq->pageofs_out + rq->outputsize; + omargin = PAGE_ALIGN(oend) - oend; + if (!rq->partial_decoding && may_inplace && ++ rq->outpages >= rq->inpages && + omargin >= LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) { + for (i = 0; i < rq->inpages; ++i) + if (rq->out[rq->outpages - rq->inpages + i] !=
diff --git a/queue-7.0/ext4-fix-bounds-check-in-check_xattrs-to-prevent-out-of-bounds-access.patch b/queue-7.0/ext4-fix-bounds-check-in-check_xattrs-to-prevent-out-of-bounds-access.patch new file mode 100644 index 0000000..9235b37 --- /dev/null +++ b/queue-7.0/ext4-fix-bounds-check-in-check_xattrs-to-prevent-out-of-bounds-access.patch
@@ -0,0 +1,44 @@ +From eceafc31ea7b42c984ece10d79d505c0bb6615d5 Mon Sep 17 00:00:00 2001 +From: Deepanshu Kartikey <kartikey406@gmail.com> +Date: Sat, 28 Mar 2026 20:30:38 +0530 +Subject: ext4: fix bounds check in check_xattrs() to prevent out-of-bounds access + +From: Deepanshu Kartikey <kartikey406@gmail.com> + +commit eceafc31ea7b42c984ece10d79d505c0bb6615d5 upstream. + +The bounds check for the next xattr entry in check_xattrs() uses +(void *)next >= end, which allows next to point within sizeof(u32) +bytes of end. On the next loop iteration, IS_LAST_ENTRY() reads 4 +bytes via *(__u32 *)(entry), which can overrun the valid xattr region. + +For example, if next lands at end - 1, the check passes since +next < end, but IS_LAST_ENTRY() reads 4 bytes starting at end - 1, +accessing 3 bytes beyond the valid region. + +Fix this by changing the check to (void *)next + sizeof(u32) > end, +ensuring there is always enough space for the IS_LAST_ENTRY() read +on the subsequent iteration. + +Fixes: 3478c83cf26b ("ext4: improve xattr consistency checking and error reporting") +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/all/20260224231429.31361-1-kartikey406@gmail.com/T/ [v1] +Signed-off-by: Deepanshu Kartikey <kartikey406@gmail.com> +Link: https://patch.msgid.link/20260328150038.349497-1-kartikey406@gmail.com +Signed-off-by: Theodore Ts'o <tytso@mit.edu> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + fs/ext4/xattr.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -226,7 +226,7 @@ check_xattrs(struct inode *inode, struct + /* Find the end of the names list */ + while (!IS_LAST_ENTRY(e)) { + struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e); +- if ((void *)next >= end) { ++ if ((void *)next + sizeof(u32) > end) { + err_str = "e_name out of bounds"; + goto errout; + }
diff --git a/queue-7.0/ext4-fix-missing-brelse-in-ext4_xattr_inode_dec_ref_all.patch b/queue-7.0/ext4-fix-missing-brelse-in-ext4_xattr_inode_dec_ref_all.patch new file mode 100644 index 0000000..814b0b6 --- /dev/null +++ b/queue-7.0/ext4-fix-missing-brelse-in-ext4_xattr_inode_dec_ref_all.patch
@@ -0,0 +1,49 @@ +From 77d059519382bd66283e6a4e83ee186e87e7708f Mon Sep 17 00:00:00 2001 +From: Sohei Koyama <skoyama@ddn.com> +Date: Mon, 6 Apr 2026 16:48:30 +0900 +Subject: ext4: fix missing brelse() in ext4_xattr_inode_dec_ref_all() + +From: Sohei Koyama <skoyama@ddn.com> + +commit 77d059519382bd66283e6a4e83ee186e87e7708f upstream. + +The commit c8e008b60492 ("ext4: ignore xattrs past end") +introduced a refcount leak in when block_csum is false. + +ext4_xattr_inode_dec_ref_all() calls ext4_get_inode_loc() to +get iloc.bh, but never releases it with brelse(). + +Fixes: c8e008b60492 ("ext4: ignore xattrs past end") +Signed-off-by: Sohei Koyama <skoyama@ddn.com> +Reviewed-by: Andreas Dilger <adilger@dilger.ca> +Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> +Cc: stable@vger.kernel.org +Reviewed-by: Zhang Yi <yi.zhang@huawei.com> +Reviewed-by: Baokun Li <libaokun@linux.alibaba.com> +Link: https://patch.msgid.link/20260406074830.8480-1-skoyama@ddn.com +Signed-off-by: Theodore Ts'o <tytso@mit.edu> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + fs/ext4/xattr.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -1165,7 +1165,7 @@ ext4_xattr_inode_dec_ref_all(handle_t *h + { + struct inode *ea_inode; + struct ext4_xattr_entry *entry; +- struct ext4_iloc iloc; ++ struct ext4_iloc iloc = { .bh = NULL }; + bool dirty = false; + unsigned int ea_ino; + int err; +@@ -1260,6 +1260,8 @@ ext4_xattr_inode_dec_ref_all(handle_t *h + ext4_warning_inode(parent, + "handle dirty metadata err=%d", err); + } ++ ++ brelse(iloc.bh); + } + + /*
diff --git a/queue-7.0/gtp-disable-bh-before-calling-udp_tunnel_xmit_skb.patch b/queue-7.0/gtp-disable-bh-before-calling-udp_tunnel_xmit_skb.patch new file mode 100644 index 0000000..3917394 --- /dev/null +++ b/queue-7.0/gtp-disable-bh-before-calling-udp_tunnel_xmit_skb.patch
@@ -0,0 +1,59 @@ +From 5638504a2aa9e1b9d72af9060df1a160cce2d379 Mon Sep 17 00:00:00 2001 +From: David Carlier <devnexen@gmail.com> +Date: Fri, 17 Apr 2026 06:54:08 +0100 +Subject: gtp: disable BH before calling udp_tunnel_xmit_skb() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: David Carlier <devnexen@gmail.com> + +commit 5638504a2aa9e1b9d72af9060df1a160cce2d379 upstream. + +gtp_genl_send_echo_req() runs as a generic netlink doit handler in +process context with BH not disabled. It calls udp_tunnel_xmit_skb(), +which eventually invokes iptunnel_xmit() — that uses __this_cpu_inc/dec +on softnet_data.xmit.recursion to track the tunnel xmit recursion level. + +Without local_bh_disable(), the task may migrate between +dev_xmit_recursion_inc() and dev_xmit_recursion_dec(), breaking the +per-CPU counter pairing. The result is stale or negative recursion +levels that can later produce false-positive +SKB_DROP_REASON_RECURSION_LIMIT drops on either CPU. + +The other udp_tunnel_xmit_skb() call sites in gtp.c are unaffected: +the data path runs under ndo_start_xmit and the echo response handlers +run from the UDP encap rx softirq, both with BH already disabled. + +Fix it by disabling BH around the udp_tunnel_xmit_skb() call, mirroring +commit 2cd7e6971fc2 ("sctp: disable BH before calling +udp_tunnel_xmit_skb()"). + +Fixes: 6f1a9140ecda ("net: add xmit recursion limit to tunnel xmit functions") +Cc: stable@vger.kernel.org +Signed-off-by: David Carlier <devnexen@gmail.com> +Link: https://patch.msgid.link/20260417055408.4667-1-devnexen@gmail.com +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/net/gtp.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -2400,6 +2400,7 @@ static int gtp_genl_send_echo_req(struct + return -ENODEV; + } + ++ local_bh_disable(); + udp_tunnel_xmit_skb(rt, sk, skb_to_send, + fl4.saddr, fl4.daddr, + inet_dscp_to_dsfield(fl4.flowi4_dscp), +@@ -2409,6 +2410,7 @@ static int gtp_genl_send_echo_req(struct + !net_eq(sock_net(sk), + dev_net(gtp->dev)), + false, 0); ++ local_bh_enable(); + return 0; + } +
diff --git a/queue-7.0/hid-apple-ensure-the-keyboard-backlight-is-off-if-suspending.patch b/queue-7.0/hid-apple-ensure-the-keyboard-backlight-is-off-if-suspending.patch new file mode 100644 index 0000000..d4d9fa0 --- /dev/null +++ b/queue-7.0/hid-apple-ensure-the-keyboard-backlight-is-off-if-suspending.patch
@@ -0,0 +1,45 @@ +From 1f95a6cd5ad78ed27a31a20cbd1facff6f10b33d Mon Sep 17 00:00:00 2001 +From: Aditya Garg <gargaditya08@live.com> +Date: Sat, 4 Apr 2026 15:14:34 +0530 +Subject: HID: apple: ensure the keyboard backlight is off if suspending +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Aditya Garg <gargaditya08@live.com> + +commit 1f95a6cd5ad78ed27a31a20cbd1facff6f10b33d upstream. + +Some users reported that upon suspending their keyboard backlight +remained on. Fix this by adding the missing LED_CORE_SUSPENDRESUME flag. + +Cc: stable@vger.kernel.org +Fixes: 394ba612f941 ("HID: apple: Add support for magic keyboard backlight on T2 Macs") +Fixes: 9018eacbe623 ("HID: apple: Add support for keyboard backlight on certain T2 Macs.") +Reported-by: André Eikmeyer <andre.eikmeyer@gmail.com> +Tested-by: André Eikmeyer <andre.eikmeyer@gmail.com> +Signed-off-by: Aditya Garg <gargaditya08@live.com> +Signed-off-by: Jiri Kosina <jkosina@suse.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/hid/hid-apple.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -858,6 +858,7 @@ static int apple_backlight_init(struct h + asc->backlight->cdev.name = "apple::kbd_backlight"; + asc->backlight->cdev.max_brightness = rep->backlight_on_max; + asc->backlight->cdev.brightness_set_blocking = apple_backlight_led_set; ++ asc->backlight->cdev.flags = LED_CORE_SUSPENDRESUME; + + ret = apple_backlight_set(hdev, 0, 0); + if (ret < 0) { +@@ -926,6 +927,7 @@ static int apple_magic_backlight_init(st + backlight->cdev.name = ":white:" LED_FUNCTION_KBD_BACKLIGHT; + backlight->cdev.max_brightness = backlight->brightness->field[0]->logical_maximum; + backlight->cdev.brightness_set_blocking = apple_magic_backlight_led_set; ++ backlight->cdev.flags = LED_CORE_SUSPENDRESUME; + + apple_magic_backlight_set(backlight, 0, 0); +
diff --git a/queue-7.0/ib-core-fix-zero-dmac-race-in-neighbor-resolution.patch b/queue-7.0/ib-core-fix-zero-dmac-race-in-neighbor-resolution.patch new file mode 100644 index 0000000..2029b21 --- /dev/null +++ b/queue-7.0/ib-core-fix-zero-dmac-race-in-neighbor-resolution.patch
@@ -0,0 +1,74 @@ +From 5e6de34d82b49cab9d8a42063e9cd0f22a4f31e5 Mon Sep 17 00:00:00 2001 +From: Chen Zhao <chezhao@nvidia.com> +Date: Sun, 5 Apr 2026 18:44:55 +0300 +Subject: IB/core: Fix zero dmac race in neighbor resolution + +From: Chen Zhao <chezhao@nvidia.com> + +commit 5e6de34d82b49cab9d8a42063e9cd0f22a4f31e5 upstream. + +dst_fetch_ha() checks nud_state without holding the neighbor lock, then +copies ha under the seqlock. A race in __neigh_update() where nud_state +is set to NUD_REACHABLE before ha is written allows dst_fetch_ha() to +read a zero MAC address while the seqlock reports no concurrent writer. + +netevent_callback amplifies this by waking ALL pending addr_req workers +when ANY neighbor becomes NUD_VALID. At scale (N peers resolving ARP +concurrently), the hit probability scales as N^2, making it near-certain +for large RDMA workloads. + +N(A): neigh_update(A) W(A): addr_resolve(A) + | [sleep] + | write_lock_bh(&A->lock) | + | A->nud_state = NUD_REACHABLE | + | // A->ha is still 0 | + | [woken by netevent_cb() of + | another neighbour] + | | dst_fetch_ha(A) + | | A->nud_state & NUD_VALID + | | read_seqbegin(&A->ha_lock) + | | snapshot = A->ha /* 0 */ + | | read_seqretry(&A->ha_lock) + | | return snapshot + | seqlock(&A->ha_lock) + | A->ha = mac_A /* too late */ + | sequnlock(&A->ha_lock) + | write_unlock_bh(&A->lock) + +The incorrect/zero mac is read and programmed in the device QP while it +was not yet updated. This causes silent packet loss and eventual +RETRY_EXC_ERR. + +Fix by holding the neighbor read lock across the nud_state check and +ha copy in dst_fetch_ha(), ensuring it synchronizes with +__neigh_update() which is updating while holding the write lock. + +Cc: stable@vger.kernel.org +Fixes: 92ebb6a0a13a ("IB/cm: Remove now useless rcu_lock in dst_fetch_ha") +Link: https://patch.msgid.link/r/20260405-fix-dmac-race-v1-1-cfa1ec2ce54a@nvidia.com +Signed-off-by: Chen Zhao <chezhao@nvidia.com> +Reviewed-by: Parav Pandit <parav@nvidia.com> +Signed-off-by: Leon Romanovsky <leonro@nvidia.com> +Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/infiniband/core/addr.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/infiniband/core/addr.c ++++ b/drivers/infiniband/core/addr.c +@@ -321,11 +321,14 @@ static int dst_fetch_ha(const struct dst + if (!n) + return -ENODATA; + ++ read_lock_bh(&n->lock); + if (!(n->nud_state & NUD_VALID)) { ++ read_unlock_bh(&n->lock); + neigh_event_send(n, NULL); + ret = -ENODATA; + } else { + neigh_ha_snapshot(dev_addr->dst_dev_addr, n, dst->dev); ++ read_unlock_bh(&n->lock); + } + + neigh_release(n);
diff --git a/queue-7.0/inotify-fix-watch-count-leak-when-fsnotify_add_inode_mark_locked-fails.patch b/queue-7.0/inotify-fix-watch-count-leak-when-fsnotify_add_inode_mark_locked-fails.patch new file mode 100644 index 0000000..00a6e86 --- /dev/null +++ b/queue-7.0/inotify-fix-watch-count-leak-when-fsnotify_add_inode_mark_locked-fails.patch
@@ -0,0 +1,45 @@ +From 6a320935fa4293e9e599ec9f85dc9eb3be7029f8 Mon Sep 17 00:00:00 2001 +From: Chia-Ming Chang <chiamingc@synology.com> +Date: Tue, 24 Feb 2026 17:34:42 +0800 +Subject: inotify: fix watch count leak when fsnotify_add_inode_mark_locked() fails + +From: Chia-Ming Chang <chiamingc@synology.com> + +commit 6a320935fa4293e9e599ec9f85dc9eb3be7029f8 upstream. + +When fsnotify_add_inode_mark_locked() fails in inotify_new_watch(), +the error path calls inotify_remove_from_idr() but does not call +dec_inotify_watches() to undo the preceding inc_inotify_watches(). +This leaks a watch count, and repeated failures can exhaust the +max_user_watches limit with -ENOSPC even when no watches are active. + +Prior to commit 1cce1eea0aff ("inotify: Convert to using per-namespace +limits"), the watch count was incremented after fsnotify_add_mark_locked() +succeeded, so this path was not affected. The conversion moved +inc_inotify_watches() before the mark insertion without adding the +corresponding rollback. + +Add the missing dec_inotify_watches() call in the error path. + +Fixes: 1cce1eea0aff ("inotify: Convert to using per-namespace limits") +Cc: stable@vger.kernel.org +Signed-off-by: Chia-Ming Chang <chiamingc@synology.com> +Signed-off-by: robbieko <robbieko@synology.com> +Reviewed-by: Nikolay Borisov <nik.borisov@suse.com> +Link: https://patch.msgid.link/20260224093442.3076294-1-chiamingc@synology.com +Signed-off-by: Jan Kara <jack@suse.cz> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + fs/notify/inotify/inotify_user.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/fs/notify/inotify/inotify_user.c ++++ b/fs/notify/inotify/inotify_user.c +@@ -621,6 +621,7 @@ static int inotify_new_watch(struct fsno + if (ret) { + /* we failed to get on the inode, get off the idr */ + inotify_remove_from_idr(group, tmp_i_mark); ++ dec_inotify_watches(group->inotify_data.ucounts); + goto out_err; + } +
diff --git a/queue-7.0/ktest-fix-the-month-in-the-name-of-the-failure-directory.patch b/queue-7.0/ktest-fix-the-month-in-the-name-of-the-failure-directory.patch new file mode 100644 index 0000000..041a361 --- /dev/null +++ b/queue-7.0/ktest-fix-the-month-in-the-name-of-the-failure-directory.patch
@@ -0,0 +1,42 @@ +From 768059ede35f197575a38b10797b52402d9d4d2f Mon Sep 17 00:00:00 2001 +From: Steven Rostedt <rostedt@goodmis.org> +Date: Mon, 20 Apr 2026 14:24:26 -0400 +Subject: ktest: Fix the month in the name of the failure directory + +From: Steven Rostedt <rostedt@goodmis.org> + +commit 768059ede35f197575a38b10797b52402d9d4d2f upstream. + +The Perl localtime() function returns the month starting at 0 not 1. This +caused the date produced to create the directory for saving files of a +failed run to have the month off by one. + + machine-test-useconfig-fail-20260314073628 + +The above happened in April, not March. The correct name should have been: + + machine-test-useconfig-fail-20260414073628 + +This was somewhat confusing. + +Cc: stable@vger.kernel.org +Cc: John 'Warthog9' Hawley <warthog9@kernel.org> +Link: https://patch.msgid.link/20260420142426.33ad0293@fedora +Fixes: 7faafbd69639b ("ktest: Add open and close console and start stop monitor") +Signed-off-by: Steven Rostedt <rostedt@goodmis.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + tools/testing/ktest/ktest.pl | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/tools/testing/ktest/ktest.pl ++++ b/tools/testing/ktest/ktest.pl +@@ -1815,7 +1815,7 @@ sub save_logs { + my ($result, $basedir) = @_; + my @t = localtime; + my $date = sprintf "%04d%02d%02d%02d%02d%02d", +- 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0]; ++ 1900+$t[5],$t[4]+1,$t[3],$t[2],$t[1],$t[0]; + + my $type = $build_type; + if ($type =~ /useconfig/) {
diff --git a/queue-7.0/md-md-llbitmap-raise-barrier-before-state-machine-transition.patch b/queue-7.0/md-md-llbitmap-raise-barrier-before-state-machine-transition.patch new file mode 100644 index 0000000..2c7d37f --- /dev/null +++ b/queue-7.0/md-md-llbitmap-raise-barrier-before-state-machine-transition.patch
@@ -0,0 +1,56 @@ +From ef4ca3d4bf09716cff9ba00eb0351deadc8417ab Mon Sep 17 00:00:00 2001 +From: Yu Kuai <yukuai@fnnas.com> +Date: Mon, 23 Feb 2026 10:40:35 +0800 +Subject: md/md-llbitmap: raise barrier before state machine transition + +From: Yu Kuai <yukuai@fnnas.com> + +commit ef4ca3d4bf09716cff9ba00eb0351deadc8417ab upstream. + +Move the barrier raise operation before calling llbitmap_state_machine() +in both llbitmap_start_write() and llbitmap_start_discard(). This +ensures the barrier is in place before any state transitions occur, +preventing potential race conditions where the state machine could +complete before the barrier is properly raised. + +Cc: stable@vger.kernel.org +Fixes: 5ab829f1971d ("md/md-llbitmap: introduce new lockless bitmap") +Link: https://lore.kernel.org/linux-raid/20260223024038.3084853-3-yukuai@fnnas.com +Signed-off-by: Yu Kuai <yukuai@fnnas.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/md/md-llbitmap.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/md/md-llbitmap.c ++++ b/drivers/md/md-llbitmap.c +@@ -1070,12 +1070,12 @@ static void llbitmap_start_write(struct + int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT; + int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT; + +- llbitmap_state_machine(llbitmap, start, end, BitmapActionStartwrite); +- + while (page_start <= page_end) { + llbitmap_raise_barrier(llbitmap, page_start); + page_start++; + } ++ ++ llbitmap_state_machine(llbitmap, start, end, BitmapActionStartwrite); + } + + static void llbitmap_end_write(struct mddev *mddev, sector_t offset, +@@ -1102,12 +1102,12 @@ static void llbitmap_start_discard(struc + int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT; + int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT; + +- llbitmap_state_machine(llbitmap, start, end, BitmapActionDiscard); +- + while (page_start <= page_end) { + llbitmap_raise_barrier(llbitmap, page_start); + page_start++; + } ++ ++ llbitmap_state_machine(llbitmap, start, end, BitmapActionDiscard); + } + + static void llbitmap_end_discard(struct mddev *mddev, sector_t offset,
diff --git a/queue-7.0/md-md-llbitmap-skip-reading-rdevs-that-are-not-in_sync.patch b/queue-7.0/md-md-llbitmap-skip-reading-rdevs-that-are-not-in_sync.patch new file mode 100644 index 0000000..760b77e --- /dev/null +++ b/queue-7.0/md-md-llbitmap-skip-reading-rdevs-that-are-not-in_sync.patch
@@ -0,0 +1,44 @@ +From 7701e68b5072faa03a8f30b4081dc16df9092381 Mon Sep 17 00:00:00 2001 +From: Yu Kuai <yukuai@fnnas.com> +Date: Mon, 23 Feb 2026 10:40:34 +0800 +Subject: md/md-llbitmap: skip reading rdevs that are not in_sync + +From: Yu Kuai <yukuai@fnnas.com> + +commit 7701e68b5072faa03a8f30b4081dc16df9092381 upstream. + +When reading bitmap pages from member disks, the code iterates through +all rdevs and attempts to read from the first available one. However, +it only checks for raid_disk assignment and Faulty flag, missing the +In_sync flag check. + +This can cause bitmap data to be read from spare disks that are still +being rebuilt and don't have valid bitmap information yet. Reading +stale or uninitialized bitmap data from such disks can lead to +incorrect dirty bit tracking, potentially causing data corruption +during recovery or normal operation. + +Add the In_sync flag check to ensure bitmap pages are only read from +fully synchronized member disks that have valid bitmap data. + +Cc: stable@vger.kernel.org +Fixes: 5ab829f1971d ("md/md-llbitmap: introduce new lockless bitmap") +Link: https://lore.kernel.org/linux-raid/20260223024038.3084853-2-yukuai@fnnas.com +Signed-off-by: Yu Kuai <yukuai@fnnas.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/md/md-llbitmap.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/md/md-llbitmap.c ++++ b/drivers/md/md-llbitmap.c +@@ -459,7 +459,8 @@ static struct page *llbitmap_read_page(s + rdev_for_each(rdev, mddev) { + sector_t sector; + +- if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags)) ++ if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags) || ++ !test_bit(In_sync, &rdev->flags)) + continue; + + sector = mddev->bitmap_info.offset +
diff --git a/queue-7.0/md-raid5-fix-soft-lockup-in-retry_aligned_read.patch b/queue-7.0/md-raid5-fix-soft-lockup-in-retry_aligned_read.patch new file mode 100644 index 0000000..b6f56dc --- /dev/null +++ b/queue-7.0/md-raid5-fix-soft-lockup-in-retry_aligned_read.patch
@@ -0,0 +1,53 @@ +From 7f9f7c697474268d9ef9479df3ddfe7cdcfbbffc Mon Sep 17 00:00:00 2001 +From: Chia-Ming Chang <chiamingc@synology.com> +Date: Thu, 2 Apr 2026 14:14:06 +0800 +Subject: md/raid5: fix soft lockup in retry_aligned_read() + +From: Chia-Ming Chang <chiamingc@synology.com> + +commit 7f9f7c697474268d9ef9479df3ddfe7cdcfbbffc upstream. + +When retry_aligned_read() encounters an overlapped stripe, it releases +the stripe via raid5_release_stripe() which puts it on the lockless +released_stripes llist. In the next raid5d loop iteration, +release_stripe_list() drains the stripe onto handle_list (since +STRIPE_HANDLE is set by the original IO), but retry_aligned_read() +runs before handle_active_stripes() and removes the stripe from +handle_list via find_get_stripe() -> list_del_init(). This prevents +handle_stripe() from ever processing the stripe to resolve the +overlap, causing an infinite loop and soft lockup. + +Fix this by using __release_stripe() with temp_inactive_list instead +of raid5_release_stripe() in the failure path, so the stripe does not +go through the released_stripes llist. This allows raid5d to break out +of its loop, and the overlap will be resolved when the stripe is +eventually processed by handle_stripe(). + +Fixes: 773ca82fa1ee ("raid5: make release_stripe lockless") +Cc: stable@vger.kernel.org +Signed-off-by: FengWei Shih <dannyshih@synology.com> +Signed-off-by: Chia-Ming Chang <chiamingc@synology.com> +Link: https://lore.kernel.org/linux-raid/20260402061406.455755-1-chiamingc@synology.com/ +Signed-off-by: Yu Kuai <yukuai@fnnas.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/md/raid5.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -6641,7 +6641,13 @@ static int retry_aligned_read(struct r5 + } + + if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { +- raid5_release_stripe(sh); ++ int hash; ++ ++ spin_lock_irq(&conf->device_lock); ++ hash = sh->hash_lock_index; ++ __release_stripe(conf, sh, ++ &conf->temp_inactive_list[hash]); ++ spin_unlock_irq(&conf->device_lock); + conf->retry_read_aligned = raid_bio; + conf->retry_read_offset = scnt; + return handled;
diff --git a/queue-7.0/md-raid5-validate-payload-size-before-accessing-journal-metadata.patch b/queue-7.0/md-raid5-validate-payload-size-before-accessing-journal-metadata.patch new file mode 100644 index 0000000..b86b335 --- /dev/null +++ b/queue-7.0/md-raid5-validate-payload-size-before-accessing-journal-metadata.patch
@@ -0,0 +1,142 @@ +From b0cc3ae97e893bf54bbce447f4e9fd2e0b88bff9 Mon Sep 17 00:00:00 2001 +From: Junrui Luo <moonafterrain@outlook.com> +Date: Sat, 4 Apr 2026 15:44:35 +0800 +Subject: md/raid5: validate payload size before accessing journal metadata + +From: Junrui Luo <moonafterrain@outlook.com> + +commit b0cc3ae97e893bf54bbce447f4e9fd2e0b88bff9 upstream. + +r5c_recovery_analyze_meta_block() and +r5l_recovery_verify_data_checksum_for_mb() iterate over payloads in a +journal metadata block using on-disk payload size fields without +validating them against the remaining space in the metadata block. + +A corrupted journal contains payload sizes extending beyond the PAGE_SIZE +boundary can cause out-of-bounds reads when accessing payload fields or +computing offsets. + +Add bounds validation for each payload type to ensure the full payload +fits within meta_size before processing. + +Fixes: b4c625c67362 ("md/r5cache: r5cache recovery: part 1") +Cc: stable@vger.kernel.org +Signed-off-by: Junrui Luo <moonafterrain@outlook.com> +Link: https://lore.kernel.org/linux-raid/SYBPR01MB78815E78D829BB86CD7C8015AF5FA@SYBPR01MB7881.ausprd01.prod.outlook.com/ +Signed-off-by: Yu Kuai <yukuai@fnnas.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/md/raid5-cache.c | 48 ++++++++++++++++++++++++++++++++--------------- + 1 file changed, 33 insertions(+), 15 deletions(-) + +--- a/drivers/md/raid5-cache.c ++++ b/drivers/md/raid5-cache.c +@@ -2002,15 +2002,27 @@ r5l_recovery_verify_data_checksum_for_mb + return -ENOMEM; + + while (mb_offset < le32_to_cpu(mb->meta_size)) { ++ sector_t payload_len; ++ + payload = (void *)mb + mb_offset; + payload_flush = (void *)mb + mb_offset; + + if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { ++ payload_len = sizeof(struct r5l_payload_data_parity) + ++ (sector_t)sizeof(__le32) * ++ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); ++ if (mb_offset + payload_len > le32_to_cpu(mb->meta_size)) ++ goto mismatch; + if (r5l_recovery_verify_data_checksum( + log, ctx, page, log_offset, + payload->checksum[0]) < 0) + goto mismatch; + } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) { ++ payload_len = sizeof(struct r5l_payload_data_parity) + ++ (sector_t)sizeof(__le32) * ++ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); ++ if (mb_offset + payload_len > le32_to_cpu(mb->meta_size)) ++ goto mismatch; + if (r5l_recovery_verify_data_checksum( + log, ctx, page, log_offset, + payload->checksum[0]) < 0) +@@ -2023,22 +2035,18 @@ r5l_recovery_verify_data_checksum_for_mb + payload->checksum[1]) < 0) + goto mismatch; + } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { +- /* nothing to do for R5LOG_PAYLOAD_FLUSH here */ ++ payload_len = sizeof(struct r5l_payload_flush) + ++ (sector_t)le32_to_cpu(payload_flush->size); ++ if (mb_offset + payload_len > le32_to_cpu(mb->meta_size)) ++ goto mismatch; + } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */ + goto mismatch; + +- if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { +- mb_offset += sizeof(struct r5l_payload_flush) + +- le32_to_cpu(payload_flush->size); +- } else { +- /* DATA or PARITY payload */ ++ if (le16_to_cpu(payload->header.type) != R5LOG_PAYLOAD_FLUSH) { + log_offset = r5l_ring_add(log, log_offset, + le32_to_cpu(payload->size)); +- mb_offset += sizeof(struct r5l_payload_data_parity) + +- sizeof(__le32) * +- (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); + } +- ++ mb_offset += payload_len; + } + + put_page(page); +@@ -2089,6 +2097,7 @@ r5c_recovery_analyze_meta_block(struct r + log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); + + while (mb_offset < le32_to_cpu(mb->meta_size)) { ++ sector_t payload_len; + int dd; + + payload = (void *)mb + mb_offset; +@@ -2097,6 +2106,12 @@ r5c_recovery_analyze_meta_block(struct r + if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { + int i, count; + ++ payload_len = sizeof(struct r5l_payload_flush) + ++ (sector_t)le32_to_cpu(payload_flush->size); ++ if (mb_offset + payload_len > ++ le32_to_cpu(mb->meta_size)) ++ return -EINVAL; ++ + count = le32_to_cpu(payload_flush->size) / sizeof(__le64); + for (i = 0; i < count; ++i) { + stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]); +@@ -2110,12 +2125,17 @@ r5c_recovery_analyze_meta_block(struct r + } + } + +- mb_offset += sizeof(struct r5l_payload_flush) + +- le32_to_cpu(payload_flush->size); ++ mb_offset += payload_len; + continue; + } + + /* DATA or PARITY payload */ ++ payload_len = sizeof(struct r5l_payload_data_parity) + ++ (sector_t)sizeof(__le32) * ++ (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); ++ if (mb_offset + payload_len > le32_to_cpu(mb->meta_size)) ++ return -EINVAL; ++ + stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ? + raid5_compute_sector( + conf, le64_to_cpu(payload->location), 0, &dd, +@@ -2180,9 +2200,7 @@ r5c_recovery_analyze_meta_block(struct r + log_offset = r5l_ring_add(log, log_offset, + le32_to_cpu(payload->size)); + +- mb_offset += sizeof(struct r5l_payload_data_parity) + +- sizeof(__le32) * +- (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); ++ mb_offset += payload_len; + } + + return 0;
diff --git a/queue-7.0/mfd-core-preserve-of-node-when-acpi-handle-is-present.patch b/queue-7.0/mfd-core-preserve-of-node-when-acpi-handle-is-present.patch new file mode 100644 index 0000000..7b14806 --- /dev/null +++ b/queue-7.0/mfd-core-preserve-of-node-when-acpi-handle-is-present.patch
@@ -0,0 +1,47 @@ +From caa5a5d44d8ae4fd13b744857d66c9313b712d1f Mon Sep 17 00:00:00 2001 +From: Brian Mak <makb@juniper.net> +Date: Wed, 25 Mar 2026 15:30:24 -0700 +Subject: mfd: core: Preserve OF node when ACPI handle is present + +From: Brian Mak <makb@juniper.net> + +commit caa5a5d44d8ae4fd13b744857d66c9313b712d1f upstream. + +Switch device_set_node to set_primary_fwnode, so that the ACPI fwnode +does not overwrite the of_node with NULL. + +This allows MFD children with both OF nodes and ACPI handles to have OF +nodes again. + +Cc: stable@vger.kernel.org +Fixes: 51e3b257099d ("mfd: core: Make use of device_set_node()") +Signed-off-by: Brian Mak <makb@juniper.net> +Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> +Link: https://patch.msgid.link/20260325223024.35992-1-makb@juniper.net +Signed-off-by: Lee Jones <lee@kernel.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/mfd/mfd-core.c | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +--- a/drivers/mfd/mfd-core.c ++++ b/drivers/mfd/mfd-core.c +@@ -88,7 +88,17 @@ static void mfd_acpi_add_device(const st + } + } + +- device_set_node(&pdev->dev, acpi_fwnode_handle(adev ?: parent)); ++ /* ++ * NOTE: The fwnode design doesn't allow proper stacking/sharing. This ++ * should eventually turn into a device fwnode API call that will allow ++ * prepending to a list of fwnodes (with ACPI taking precedence). ++ * ++ * set_primary_fwnode() is used here, instead of device_set_node(), as ++ * device_set_node() will overwrite the existing fwnode, which may be an ++ * OF node that was populated earlier. To support a use case where ACPI ++ * and OF is used in conjunction, we call set_primary_fwnode() instead. ++ */ ++ set_primary_fwnode(&pdev->dev, acpi_fwnode_handle(adev ?: parent)); + } + #else + static inline void mfd_acpi_add_device(const struct mfd_cell *cell,
diff --git a/queue-7.0/mm-swap-speed-up-hibernation-allocation-and-writeout.patch b/queue-7.0/mm-swap-speed-up-hibernation-allocation-and-writeout.patch new file mode 100644 index 0000000..5971f37 --- /dev/null +++ b/queue-7.0/mm-swap-speed-up-hibernation-allocation-and-writeout.patch
@@ -0,0 +1,79 @@ +From 396f57b5720024638dbb503f6a4abd988a49d815 Mon Sep 17 00:00:00 2001 +From: Kairui Song <kasong@tencent.com> +Date: Mon, 16 Feb 2026 22:58:02 +0800 +Subject: mm, swap: speed up hibernation allocation and writeout + +From: Kairui Song <kasong@tencent.com> + +commit 396f57b5720024638dbb503f6a4abd988a49d815 upstream. + +Since commit 0ff67f990bd4 ("mm, swap: remove swap slot cache"), +hibernation has been using the swap slot slow allocation path for +simplification, which turns out might cause regression for some devices +because the allocator now rotates clusters too often, leading to slower +allocation and more random distribution of data. + +Fast allocation is not complex, so implement hibernation support as well. + +Test result with Samsung SSD 830 Series (SATA II, 3.0 Gbps) shows the +performance is several times better [1]: +6.19: 324 seconds +After this series: 35 seconds + +Link: https://lkml.kernel.org/r/20260216-hibernate-perf-v4-1-1ba9f0bf1ec9@tencent.com +Link: https://lore.kernel.org/linux-mm/8b4bdcfa-ce3f-4e23-839f-31367df7c18f@gmx.de/ [1] +Signed-off-by: Kairui Song <kasong@tencent.com> +Fixes: 0ff67f990bd4 ("mm, swap: remove swap slot cache") +Reported-by: Carsten Grohmann <mail@carstengrohmann.de> +Closes: https://lore.kernel.org/linux-mm/20260206121151.dea3633d1f0ded7bbf49c22e@linux-foundation.org/ +Cc: Baoquan He <bhe@redhat.com> +Cc: Barry Song <baohua@kernel.org> +Cc: Chris Li <chrisl@kernel.org> +Cc: Kemeng Shi <shikemeng@huaweicloud.com> +Cc: Nhat Pham <nphamcs@gmail.com> +Cc: <stable@vger.kernel.org> +Signed-off-by: Andrew Morton <akpm@linux-foundation.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + mm/swapfile.c | 21 ++++++++++++++++----- + 1 file changed, 16 insertions(+), 5 deletions(-) + +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -1926,8 +1926,9 @@ out: + /* Allocate a slot for hibernation */ + swp_entry_t swap_alloc_hibernation_slot(int type) + { +- struct swap_info_struct *si = swap_type_to_info(type); +- unsigned long offset; ++ struct swap_info_struct *pcp_si, *si = swap_type_to_info(type); ++ unsigned long pcp_offset, offset = SWAP_ENTRY_INVALID; ++ struct swap_cluster_info *ci; + swp_entry_t entry = {0}; + + if (!si) +@@ -1937,11 +1938,21 @@ swp_entry_t swap_alloc_hibernation_slot( + if (get_swap_device_info(si)) { + if (si->flags & SWP_WRITEOK) { + /* +- * Grab the local lock to be compliant +- * with swap table allocation. ++ * Try the local cluster first if it matches the device. If ++ * not, try grab a new cluster and override local cluster. + */ + local_lock(&percpu_swap_cluster.lock); +- offset = cluster_alloc_swap_entry(si, NULL); ++ pcp_si = this_cpu_read(percpu_swap_cluster.si[0]); ++ pcp_offset = this_cpu_read(percpu_swap_cluster.offset[0]); ++ if (pcp_si == si && pcp_offset) { ++ ci = swap_cluster_lock(si, pcp_offset); ++ if (cluster_is_usable(ci, 0)) ++ offset = alloc_swap_scan_cluster(si, ci, NULL, pcp_offset); ++ else ++ swap_cluster_unlock(ci); ++ } ++ if (!offset) ++ offset = cluster_alloc_swap_entry(si, NULL); + local_unlock(&percpu_swap_cluster.lock); + if (offset) + entry = swp_entry(si->type, offset);
diff --git a/queue-7.0/mptcp-sync-the-msk-sndbuf-at-accept-time.patch b/queue-7.0/mptcp-sync-the-msk-sndbuf-at-accept-time.patch new file mode 100644 index 0000000..c3af1bd --- /dev/null +++ b/queue-7.0/mptcp-sync-the-msk-sndbuf-at-accept-time.patch
@@ -0,0 +1,62 @@ +From fcf04b14334641f4b0b8647824480935e9416d52 Mon Sep 17 00:00:00 2001 +From: Gang Yan <yangang@kylinos.cn> +Date: Mon, 20 Apr 2026 18:19:23 +0200 +Subject: mptcp: sync the msk->sndbuf at accept() time + +From: Gang Yan <yangang@kylinos.cn> + +commit fcf04b14334641f4b0b8647824480935e9416d52 upstream. + +On passive MPTCP connections, the msk sndbuf is not updated correctly. + +The root cause is an order issue in the accept path: + +- tcp_check_req() -> subflow_syn_recv_sock() -> mptcp_sk_clone_init() + calls __mptcp_propagate_sndbuf() to copy the ssk sndbuf into msk + +- Later, tcp_child_process() -> tcp_init_transfer() -> + tcp_sndbuf_expand() grows the ssk sndbuf. + +So __mptcp_propagate_sndbuf() runs before the ssk sndbuf has been +expanded and the msk ends up with a much smaller sndbuf than the +subflow: + + MPTCP: msk->sndbuf:20480, msk->first->sndbuf:2626560 + +Fix this by moving the __mptcp_propagate_sndbuf() call from +mptcp_sk_clone_init() -- the ssk sndbuf is not yet finalized there -- to +__mptcp_propagate_sndbuf() at accept() time, when the ssk sndbuf has +been fully expanded by tcp_sndbuf_expand(). + +Fixes: 8005184fd1ca ("mptcp: refactor sndbuf auto-tuning") +Cc: stable@vger.kernel.org +Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/602 +Signed-off-by: Gang Yan <yangang@kylinos.cn> +Acked-by: Paolo Abeni <pabeni@redhat.com> +Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org> +Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org> +Link: https://patch.msgid.link/20260420-net-mptcp-sync-sndbuf-accept-v1-1-e3523e3aeb44@kernel.org +Signed-off-by: Paolo Abeni <pabeni@redhat.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + net/mptcp/protocol.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -3570,7 +3570,6 @@ struct sock *mptcp_sk_clone_init(const s + * uses the correct data + */ + mptcp_copy_inaddrs(nsk, ssk); +- __mptcp_propagate_sndbuf(nsk, ssk); + + mptcp_rcv_space_init(msk, ssk); + msk->rcvq_space.time = mptcp_stamp(); +@@ -4228,6 +4227,7 @@ static int mptcp_stream_accept(struct so + + mptcp_graft_subflows(newsk); + mptcp_rps_record_subflows(msk); ++ __mptcp_propagate_sndbuf(newsk, mptcp_subflow_tcp_sock(subflow)); + + /* Do late cleanup for the first subflow as necessary. Also + * deal with bad peers not doing a complete shutdown.
diff --git a/queue-7.0/mtd-spi-nor-sst-fix-write-enable-before-aai-sequence.patch b/queue-7.0/mtd-spi-nor-sst-fix-write-enable-before-aai-sequence.patch new file mode 100644 index 0000000..8d42e64 --- /dev/null +++ b/queue-7.0/mtd-spi-nor-sst-fix-write-enable-before-aai-sequence.patch
@@ -0,0 +1,61 @@ +From a0f64241d3566a49c0a9b33ba7ae458ae22003a9 Mon Sep 17 00:00:00 2001 +From: Sanjaikumar V S <sanjaikumar.vs@dicortech.com> +Date: Wed, 11 Mar 2026 10:30:56 +0000 +Subject: mtd: spi-nor: sst: Fix write enable before AAI sequence + +From: Sanjaikumar V S <sanjaikumar.vs@dicortech.com> + +commit a0f64241d3566a49c0a9b33ba7ae458ae22003a9 upstream. + +When writing to SST flash starting at an odd address, a single byte is +first programmed using the byte program (BP) command. After this +operation completes, the flash hardware automatically clears the Write +Enable Latch (WEL) bit. + +If an AAI (Auto Address Increment) word program sequence follows, it +requires WEL to be set. Without re-enabling writes, the AAI sequence +fails. + +Add spi_nor_write_enable() after the odd-address byte program when more +data needs to be written. Use a local boolean for clarity. + +Fixes: b199489d37b2 ("mtd: spi-nor: add the framework for SPI NOR") +Cc: stable@vger.kernel.org +Signed-off-by: Sanjaikumar V S <sanjaikumar.vs@dicortech.com> +Tested-by: Hendrik Donner <hd@os-cillation.de> +Reviewed-by: Hendrik Donner <hd@os-cillation.de> +Signed-off-by: Pratyush Yadav (Google) <pratyush@kernel.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/mtd/spi-nor/sst.c | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +--- a/drivers/mtd/spi-nor/sst.c ++++ b/drivers/mtd/spi-nor/sst.c +@@ -203,6 +203,8 @@ static int sst_nor_write(struct mtd_info + + /* Start write from odd address. */ + if (to % 2) { ++ bool needs_write_enable = (len > 1); ++ + /* write one byte. */ + ret = sst_nor_write_data(nor, to, 1, buf); + if (ret < 0) +@@ -210,6 +212,17 @@ static int sst_nor_write(struct mtd_info + + to++; + actual++; ++ ++ /* ++ * Byte program clears the write enable latch. If more ++ * data needs to be written using the AAI sequence, ++ * re-enable writes. ++ */ ++ if (needs_write_enable) { ++ ret = spi_nor_write_enable(nor); ++ if (ret) ++ goto out; ++ } + } + + /* Write out most of the data here. */
diff --git a/queue-7.0/mtd-spinand-winbond-declare-the-qe-bit-on-w25nxxjw.patch b/queue-7.0/mtd-spinand-winbond-declare-the-qe-bit-on-w25nxxjw.patch new file mode 100644 index 0000000..69928cd --- /dev/null +++ b/queue-7.0/mtd-spinand-winbond-declare-the-qe-bit-on-w25nxxjw.patch
@@ -0,0 +1,41 @@ +From 7866ce992cf0d3c3b50fe8bf4acb1dbb173a2304 Mon Sep 17 00:00:00 2001 +From: Miquel Raynal <miquel.raynal@bootlin.com> +Date: Wed, 25 Mar 2026 18:04:50 +0100 +Subject: mtd: spinand: winbond: Declare the QE bit on W25NxxJW + +From: Miquel Raynal <miquel.raynal@bootlin.com> + +commit 7866ce992cf0d3c3b50fe8bf4acb1dbb173a2304 upstream. + +Factory default for this bit is "set" (at least on the chips I have), +but we must make sure it is actually set by Linux explicitly, as the +bit is writable by an earlier stage. + +Fixes: 6a804fb72de5 ("mtd: spinand: winbond: add support for serial NAND flash") +Cc: stable@vger.kernel.org +Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/mtd/nand/spi/winbond.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/mtd/nand/spi/winbond.c ++++ b/drivers/mtd/nand/spi/winbond.c +@@ -485,7 +485,7 @@ static const struct spinand_info winbond + SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants, + &write_cache_variants, + &update_cache_variants), +- 0, ++ SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&w25n01jw_ooblayout, NULL), + SPINAND_CONFIGURE_CHIP(w25n0xjw_hs_cfg)), + SPINAND_INFO("W25N01KV", /* 3.3V */ +@@ -549,7 +549,7 @@ static const struct spinand_info winbond + SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants, + &write_cache_variants, + &update_cache_variants), +- 0, ++ SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL), + SPINAND_CONFIGURE_CHIP(w25n0xjw_hs_cfg)), + SPINAND_INFO("W25N02KV", /* 3.3V */
diff --git a/queue-7.0/nfsv4.1-apply-session-size-limits-on-clone-path.patch b/queue-7.0/nfsv4.1-apply-session-size-limits-on-clone-path.patch new file mode 100644 index 0000000..0f6fcea --- /dev/null +++ b/queue-7.0/nfsv4.1-apply-session-size-limits-on-clone-path.patch
@@ -0,0 +1,74 @@ +From 8c787b286f39c7584440b97b92f87cbe934c13ff Mon Sep 17 00:00:00 2001 +From: Tushar Sariya <tushar.97@hotmail.com> +Date: Sat, 4 Apr 2026 11:58:03 -0230 +Subject: NFSv4.1: Apply session size limits on clone path + +From: Tushar Sariya <tushar.97@hotmail.com> + +commit 8c787b286f39c7584440b97b92f87cbe934c13ff upstream. + +nfs4_clone_server() builds a child nfs_server for same-server +automounted submounts but never calls nfs4_session_limit_rwsize() +or nfs4_session_limit_xasize() after nfs_clone_server(). This means +the child mount can end up with rsize/wsize values that exceed the +negotiated session channel limits, causing NFS4ERR_REQ_TOO_BIG and +EIO on servers that enforce tight max_request_size budgets. + +Top-level mounts go through nfs4_server_common_setup() which calls +these limiters after nfs_probe_server(). Apply the same clamping on +the clone path for consistency. + +Fixes: 2b092175f5e3 ("NFS: Fix inheritance of the block sizes when automounting") +Cc: stable@vger.kernel.org +Signed-off-by: Tushar Sariya <tushar.97@hotmail.com> +Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + fs/nfs/internal.h | 2 ++ + fs/nfs/nfs4client.c | 4 ++-- + fs/nfs/nfs4proc.c | 3 +++ + 3 files changed, 7 insertions(+), 2 deletions(-) + +--- a/fs/nfs/internal.h ++++ b/fs/nfs/internal.h +@@ -253,6 +253,8 @@ extern struct nfs_client *nfs4_set_ds_cl + u32 minor_version); + extern struct rpc_clnt *nfs4_find_or_create_ds_client(struct nfs_client *, + struct inode *); ++extern void nfs4_session_limit_rwsize(struct nfs_server *server); ++extern void nfs4_session_limit_xasize(struct nfs_server *server); + extern struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv, + const struct sockaddr_storage *ds_addr, int ds_addrlen, + int ds_proto, unsigned int ds_timeo, +--- a/fs/nfs/nfs4client.c ++++ b/fs/nfs/nfs4client.c +@@ -855,7 +855,7 @@ EXPORT_SYMBOL_GPL(nfs4_set_ds_client); + * Limit the mount rsize, wsize and dtsize using negotiated fore + * channel attributes. + */ +-static void nfs4_session_limit_rwsize(struct nfs_server *server) ++void nfs4_session_limit_rwsize(struct nfs_server *server) + { + struct nfs4_session *sess; + u32 server_resp_sz; +@@ -878,7 +878,7 @@ static void nfs4_session_limit_rwsize(st + /* + * Limit xattr sizes using the channel attributes. + */ +-static void nfs4_session_limit_xasize(struct nfs_server *server) ++void nfs4_session_limit_xasize(struct nfs_server *server) + { + #ifdef CONFIG_NFS_V4_2 + struct nfs4_session *sess; +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -10618,6 +10618,9 @@ static struct nfs_server *nfs4_clone_ser + if (IS_ERR(server)) + return server; + ++ nfs4_session_limit_rwsize(server); ++ nfs4_session_limit_xasize(server); ++ + error = nfs4_delegation_hash_alloc(server); + if (error) { + nfs_free_server(server);
diff --git a/queue-7.0/ntfs3-add-buffer-boundary-checks-to-run_unpack.patch b/queue-7.0/ntfs3-add-buffer-boundary-checks-to-run_unpack.patch new file mode 100644 index 0000000..dfff5fd --- /dev/null +++ b/queue-7.0/ntfs3-add-buffer-boundary-checks-to-run_unpack.patch
@@ -0,0 +1,51 @@ +From b62567bca47408e6739dee75f02a2113548af875 Mon Sep 17 00:00:00 2001 +From: Tobias Gaertner <tob.gaertner@me.com> +Date: Sun, 29 Mar 2026 04:17:02 -0700 +Subject: ntfs3: add buffer boundary checks to run_unpack() + +From: Tobias Gaertner <tob.gaertner@me.com> + +commit b62567bca47408e6739dee75f02a2113548af875 upstream. + +run_unpack() checks `run_buf < run_last` at the top of the while loop +but then reads size_size and offset_size bytes via run_unpack_s64() +without verifying they fit within the remaining buffer. A crafted NTFS +image with truncated run data in an MFT attribute triggers an OOB heap +read of up to 15 bytes when the filesystem is mounted. + +Add boundary checks before each run_unpack_s64() call to ensure the +declared field size does not exceed the remaining buffer. + +Found by fuzzing with a source-patched harness (LibAFL + QEMU). + +Fixes: 82cae269cfa95 ("fs/ntfs3: Add initialization of super block") +Cc: stable@vger.kernel.org +Signed-off-by: Tobias Gaertner <tob.gaertner@me.com> +Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + fs/ntfs3/run.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/fs/ntfs3/run.c ++++ b/fs/ntfs3/run.c +@@ -1008,6 +1008,9 @@ int run_unpack(struct runs_tree *run, st + if (size_size > sizeof(len)) + return -EINVAL; + ++ if (run_buf + size_size > run_last) ++ return -EINVAL; ++ + len = run_unpack_s64(run_buf, size_size, 0); + /* Skip size_size. */ + run_buf += size_size; +@@ -1020,6 +1023,9 @@ int run_unpack(struct runs_tree *run, st + else if (offset_size <= sizeof(s64)) { + s64 dlcn; + ++ if (run_buf + offset_size > run_last) ++ return -EINVAL; ++ + /* Initial value of dlcn is -1 or 0. */ + dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0; + dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
diff --git a/queue-7.0/ntfs3-fix-integer-overflow-in-run_unpack-volume-boundary-check.patch b/queue-7.0/ntfs3-fix-integer-overflow-in-run_unpack-volume-boundary-check.patch new file mode 100644 index 0000000..a61de7a --- /dev/null +++ b/queue-7.0/ntfs3-fix-integer-overflow-in-run_unpack-volume-boundary-check.patch
@@ -0,0 +1,47 @@ +From 984a415f019536ea2d24de9010744e5302a9a948 Mon Sep 17 00:00:00 2001 +From: Tobias Gaertner <tob.gaertner@me.com> +Date: Sun, 29 Mar 2026 04:17:03 -0700 +Subject: ntfs3: fix integer overflow in run_unpack() volume boundary check + +From: Tobias Gaertner <tob.gaertner@me.com> + +commit 984a415f019536ea2d24de9010744e5302a9a948 upstream. + +The volume boundary check `lcn + len > sbi->used.bitmap.nbits` uses raw +addition which can wrap around for large lcn and len values, bypassing +the validation. Use check_add_overflow() as is already done for the +adjacent prev_lcn + dlcn and vcn64 + len checks added by commit +3ac37e100385 ("ntfs3: Fix integer overflow in run_unpack()"). + +Found by fuzzing with a source-patched harness (LibAFL + QEMU). + +Fixes: 82cae269cfa95 ("fs/ntfs3: Add initialization of super block") +Cc: stable@vger.kernel.org +Signed-off-by: Tobias Gaertner <tob.gaertner@me.com> +Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + fs/ntfs3/run.c | 12 +++++++++--- + 1 file changed, 9 insertions(+), 3 deletions(-) + +--- a/fs/ntfs3/run.c ++++ b/fs/ntfs3/run.c +@@ -1065,9 +1065,15 @@ int run_unpack(struct runs_tree *run, st + return -EOPNOTSUPP; + } + #endif +- if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) { +- /* LCN range is out of volume. */ +- return -EINVAL; ++ if (lcn != SPARSE_LCN64) { ++ u64 lcn_end; ++ ++ if (check_add_overflow(lcn, len, &lcn_end)) ++ return -EINVAL; ++ if (lcn_end > sbi->used.bitmap.nbits) { ++ /* LCN range is out of volume. */ ++ return -EINVAL; ++ } + } + + if (!run)
diff --git a/queue-7.0/ring-buffer-do-not-double-count-the-reader_page.patch b/queue-7.0/ring-buffer-do-not-double-count-the-reader_page.patch new file mode 100644 index 0000000..719bbb0 --- /dev/null +++ b/queue-7.0/ring-buffer-do-not-double-count-the-reader_page.patch
@@ -0,0 +1,71 @@ +From 92d5a606721f759ebebf448b3bd2b7a781d50bd0 Mon Sep 17 00:00:00 2001 +From: "Masami Hiramatsu (Google)" <mhiramat@kernel.org> +Date: Fri, 24 Apr 2026 15:52:10 +0900 +Subject: ring-buffer: Do not double count the reader_page + +From: Masami Hiramatsu (Google) <mhiramat@kernel.org> + +commit 92d5a606721f759ebebf448b3bd2b7a781d50bd0 upstream. + +Since the cpu_buffer->reader_page is updated if there are unwound +pages. After that update, we should skip the page if it is the +original reader_page, because the original reader_page is already +checked. + +Cc: stable@vger.kernel.org +Cc: Catalin Marinas <catalin.marinas@arm.com> +Cc: Will Deacon <will@kernel.org> +Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> +Cc: Ian Rogers <irogers@google.com> +Link: https://patch.msgid.link/177701353063.2223789.1471163147644103306.stgit@mhiramat.tok.corp.google.com +Fixes: ca296d32ece3 ("tracing: ring_buffer: Rewind persistent ring buffer on reboot") +Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> +Signed-off-by: Steven Rostedt <rostedt@goodmis.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + kernel/trace/ring_buffer.c | 13 +++++++------ + 1 file changed, 7 insertions(+), 6 deletions(-) + +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -1913,7 +1913,7 @@ static int rb_validate_buffer(struct buf + static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer) + { + struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; +- struct buffer_page *head_page, *orig_head; ++ struct buffer_page *head_page, *orig_head, *orig_reader; + unsigned long entry_bytes = 0; + unsigned long entries = 0; + int ret; +@@ -1924,16 +1924,17 @@ static void rb_meta_validate_events(stru + return; + + orig_head = head_page = cpu_buffer->head_page; ++ orig_reader = cpu_buffer->reader_page; + + /* Do the reader page first */ +- ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu); ++ ret = rb_validate_buffer(orig_reader->page, cpu_buffer->cpu); + if (ret < 0) { + pr_info("Ring buffer reader page is invalid\n"); + goto invalid; + } + entries += ret; +- entry_bytes += local_read(&cpu_buffer->reader_page->page->commit); +- local_set(&cpu_buffer->reader_page->entries, ret); ++ entry_bytes += local_read(&orig_reader->page->commit); ++ local_set(&orig_reader->entries, ret); + + ts = head_page->page->time_stamp; + +@@ -2036,8 +2037,8 @@ static void rb_meta_validate_events(stru + /* Iterate until finding the commit page */ + for (i = 0; i < meta->nr_subbufs + 1; i++, rb_inc_page(&head_page)) { + +- /* Reader page has already been done */ +- if (head_page == cpu_buffer->reader_page) ++ /* The original reader page has already been checked/counted. */ ++ if (head_page == orig_reader) + continue; + + ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu);
diff --git a/queue-7.0/rtmutex-use-waiter-task-instead-of-current-in-remove_waiter.patch b/queue-7.0/rtmutex-use-waiter-task-instead-of-current-in-remove_waiter.patch new file mode 100644 index 0000000..2de3fde --- /dev/null +++ b/queue-7.0/rtmutex-use-waiter-task-instead-of-current-in-remove_waiter.patch
@@ -0,0 +1,83 @@ +From 3bfdc63936dd4773109b7b8c280c0f3b5ae7d349 Mon Sep 17 00:00:00 2001 +From: Keenan Dong <keenanat2000@gmail.com> +Date: Wed, 8 Apr 2026 16:46:00 +0800 +Subject: rtmutex: Use waiter::task instead of current in remove_waiter() + +From: Keenan Dong <keenanat2000@gmail.com> + +commit 3bfdc63936dd4773109b7b8c280c0f3b5ae7d349 upstream. + +remove_waiter() is used by the slowlock paths, but it is also used for +proxy-lock rollback in rt_mutex_start_proxy_lock() when invoked from +futex_requeue(). + +In the latter case waiter::task is not current, but remove_waiter() +operates on current for the dequeue operation. That results in several +problems: + + 1) the rbtree dequeue happens without waiter::task::pi_lock being held + + 2) the waiter task's pi_blocked_on state is not cleared, which leaves a + dangling pointer primed for UAF around. + + 3) rt_mutex_adjust_prio_chain() operates on the wrong top priority waiter + task + +Use waiter::task instead of current in all related operations in +remove_waiter() to cure those problems. + +[ tglx: Fixup rt_mutex_adjust_prio_chain(), add a comment and amend the + changelog ] + +Fixes: 8161239a8bcc ("rtmutex: Simplify PI algorithm and make highest prio task get lock") +Reported-by: Yuan Tan <yuantan098@gmail.com> +Reported-by: Yifan Wu <yifanwucs@gmail.com> +Reported-by: Juefei Pu <tomapufckgml@gmail.com> +Reported-by: Xin Liu <bird@lzu.edu.cn> +Signed-off-by: Keenan Dong <keenanat2000@gmail.com> +Signed-off-by: Thomas Gleixner <tglx@kernel.org> +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + kernel/locking/rtmutex.c | 13 ++++++++----- + 1 file changed, 8 insertions(+), 5 deletions(-) + +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -1535,20 +1535,23 @@ static bool rtmutex_spin_on_owner(struct + * + * Must be called with lock->wait_lock held and interrupts disabled. It must + * have just failed to try_to_take_rt_mutex(). ++ * ++ * When invoked from rt_mutex_start_proxy_lock() waiter::task != current ! + */ + static void __sched remove_waiter(struct rt_mutex_base *lock, + struct rt_mutex_waiter *waiter) + { + bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); + struct task_struct *owner = rt_mutex_owner(lock); ++ struct task_struct *waiter_task = waiter->task; + struct rt_mutex_base *next_lock; + + lockdep_assert_held(&lock->wait_lock); + +- raw_spin_lock(¤t->pi_lock); +- rt_mutex_dequeue(lock, waiter); +- current->pi_blocked_on = NULL; +- raw_spin_unlock(¤t->pi_lock); ++ scoped_guard(raw_spinlock, &waiter_task->pi_lock) { ++ rt_mutex_dequeue(lock, waiter); ++ waiter_task->pi_blocked_on = NULL; ++ } + + /* + * Only update priority if the waiter was the highest priority +@@ -1584,7 +1587,7 @@ static void __sched remove_waiter(struct + raw_spin_unlock_irq(&lock->wait_lock); + + rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, +- next_lock, NULL, current); ++ next_lock, NULL, waiter_task); + + raw_spin_lock_irq(&lock->wait_lock); + }
diff --git a/queue-7.0/rxgk-fix-potential-integer-overflow-in-length-check.patch b/queue-7.0/rxgk-fix-potential-integer-overflow-in-length-check.patch new file mode 100644 index 0000000..5f8a2c8 --- /dev/null +++ b/queue-7.0/rxgk-fix-potential-integer-overflow-in-length-check.patch
@@ -0,0 +1,50 @@ +From 6929350080f4da292d111a3b33e53138fee51cec Mon Sep 17 00:00:00 2001 +From: David Howells <dhowells@redhat.com> +Date: Wed, 22 Apr 2026 17:14:34 +0100 +Subject: rxgk: Fix potential integer overflow in length check + +From: David Howells <dhowells@redhat.com> + +commit 6929350080f4da292d111a3b33e53138fee51cec upstream. + +Fix potential integer overflow in rxgk_extract_token() when checking the +length of the ticket. Rather than rounding up the value to be tested +(which might overflow), round down the size of the available data. + +Fixes: 2429a1976481 ("rxrpc: Fix untrusted unsigned subtract") +Closes: https://sashiko.dev/#/patchset/20260408121252.2249051-1-dhowells%40redhat.com +Signed-off-by: David Howells <dhowells@redhat.com> +cc: Marc Dionne <marc.dionne@auristor.com> +cc: Jeffrey Altman <jaltman@auristor.com> +cc: Simon Horman <horms@kernel.org> +cc: linux-afs@lists.infradead.org +cc: stable@kernel.org +Link: https://patch.msgid.link/20260422161438.2593376-6-dhowells@redhat.com +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + net/rxrpc/rxgk_app.c | 2 +- + net/rxrpc/rxgk_common.h | 1 + + 2 files changed, 2 insertions(+), 1 deletion(-) + +--- a/net/rxrpc/rxgk_app.c ++++ b/net/rxrpc/rxgk_app.c +@@ -214,7 +214,7 @@ int rxgk_extract_token(struct rxrpc_conn + ticket_len = ntohl(container.token_len); + ticket_offset = token_offset + sizeof(container); + +- if (xdr_round_up(ticket_len) > token_len - sizeof(container)) ++ if (ticket_len > xdr_round_down(token_len - sizeof(container))) + goto short_packet; + + _debug("KVNO %u", kvno); +--- a/net/rxrpc/rxgk_common.h ++++ b/net/rxrpc/rxgk_common.h +@@ -34,6 +34,7 @@ struct rxgk_context { + }; + + #define xdr_round_up(x) (round_up((x), sizeof(__be32))) ++#define xdr_round_down(x) (round_down((x), sizeof(__be32))) + #define xdr_object_len(x) (4 + xdr_round_up(x)) + + /*
diff --git a/queue-7.0/sched_ext-documentation-clarify-ops.dispatch-role-in-task-lifecycle.patch b/queue-7.0/sched_ext-documentation-clarify-ops.dispatch-role-in-task-lifecycle.patch new file mode 100644 index 0000000..a2f7501 --- /dev/null +++ b/queue-7.0/sched_ext-documentation-clarify-ops.dispatch-role-in-task-lifecycle.patch
@@ -0,0 +1,53 @@ +From a313357a346839d40b3a4dec393c71bf30cbb34c Mon Sep 17 00:00:00 2001 +From: Andrea Righi <arighi@nvidia.com> +Date: Wed, 25 Mar 2026 22:21:00 +0100 +Subject: sched_ext: Documentation: Clarify ops.dispatch() role in task lifecycle + +From: Andrea Righi <arighi@nvidia.com> + +commit a313357a346839d40b3a4dec393c71bf30cbb34c upstream. + +ops.dispatch() is invoked when a CPU becomes available. This can occur +when a task voluntarily yields the CPU, exhausts its time slice, or is +preempted for other reasons. + +If the task is still runnable, refilling its time slice in +ops.dispatch() (either by the BPF scheduler or the sched_ext core) +allows it to continue running without triggering ops.stopping(). +However, this behavior is not clearly reflected in the current task +lifecycle diagram. + +Update the diagram to better represent this interaction. + +Fixes: 9465f44d2df2 ("sched_ext: Documentation: Clarify time slice handling in task lifecycle") +Cc: stable@vger.kernel.org # v6.17+ +Signed-off-by: Andrea Righi <arighi@nvidia.com> +Signed-off-by: Tejun Heo <tj@kernel.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + Documentation/scheduler/sched-ext.rst | 12 +++++++----- + 1 file changed, 7 insertions(+), 5 deletions(-) + +--- a/Documentation/scheduler/sched-ext.rst ++++ b/Documentation/scheduler/sched-ext.rst +@@ -320,13 +320,15 @@ by a sched_ext scheduler: + ops.dispatch(); /* Task is moved to a local DSQ */ + } + ops.running(); /* Task starts running on its assigned CPU */ +- while (task->scx.slice > 0 && task is runnable) +- ops.tick(); /* Called every 1/HZ seconds */ +- ops.stopping(); /* Task stops running (time slice expires or wait) */ + +- /* Task's CPU becomes available */ ++ while task_is_runnable(p) { ++ while (task->scx.slice > 0 && task_is_runnable(p)) ++ ops.tick(); /* Called every 1/HZ seconds */ ++ ++ ops.dispatch(); /* task->scx.slice can be refilled */ ++ } + +- ops.dispatch(); /* task->scx.slice can be refilled */ ++ ops.stopping(); /* Task stops running (time slice expires or wait) */ + } + + ops.quiescent(); /* Task releases its assigned CPU (wait) */
diff --git a/queue-7.0/scsi-sd-fix-missing-put_disk-when-device_add-disk_dev-fails.patch b/queue-7.0/scsi-sd-fix-missing-put_disk-when-device_add-disk_dev-fails.patch new file mode 100644 index 0000000..2159e14 --- /dev/null +++ b/queue-7.0/scsi-sd-fix-missing-put_disk-when-device_add-disk_dev-fails.patch
@@ -0,0 +1,35 @@ +From 1e111c4b3a726df1254670a5cc4868cedb946d37 Mon Sep 17 00:00:00 2001 +From: Yang Xiuwei <yangxiuwei@kylinos.cn> +Date: Mon, 30 Mar 2026 09:49:52 +0800 +Subject: scsi: sd: fix missing put_disk() when device_add(&disk_dev) fails + +From: Yang Xiuwei <yangxiuwei@kylinos.cn> + +commit 1e111c4b3a726df1254670a5cc4868cedb946d37 upstream. + +If device_add(&sdkp->disk_dev) fails, put_device() runs +scsi_disk_release(), which frees the scsi_disk but leaves the gendisk +referenced. The device_add_disk() error path in sd_probe() calls +put_disk(gd); call put_disk(gd) here to mirror that cleanup. + +Fixes: 265dfe8ebbab ("scsi: sd: Free scsi_disk device via put_device()") +Cc: stable@vger.kernel.org +Reviewed-by: John Garry <john.g.garry@oracle.com> +Signed-off-by: Yang Xiuwei <yangxiuwei@kylinos.cn> +Link: https://patch.msgid.link/20260330014952.152776-1-yangxiuwei@kylinos.cn +Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/scsi/sd.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -4018,6 +4018,7 @@ static int sd_probe(struct scsi_device * + error = device_add(&sdkp->disk_dev); + if (error) { + put_device(&sdkp->disk_dev); ++ put_disk(gd); + goto out; + } +
diff --git a/queue-7.0/seg6-fix-seg6-lwtunnel-output-redirect-for-l2-reduced-encap-mode.patch b/queue-7.0/seg6-fix-seg6-lwtunnel-output-redirect-for-l2-reduced-encap-mode.patch new file mode 100644 index 0000000..0df78a9 --- /dev/null +++ b/queue-7.0/seg6-fix-seg6-lwtunnel-output-redirect-for-l2-reduced-encap-mode.patch
@@ -0,0 +1,42 @@ +From ade67d5f588832c7ba131aadd4215a94ce0a15c8 Mon Sep 17 00:00:00 2001 +From: Andrea Mayer <andrea.mayer@uniroma2.it> +Date: Sat, 18 Apr 2026 18:28:38 +0200 +Subject: seg6: fix seg6 lwtunnel output redirect for L2 reduced encap mode + +From: Andrea Mayer <andrea.mayer@uniroma2.it> + +commit ade67d5f588832c7ba131aadd4215a94ce0a15c8 upstream. + +When SEG6_IPTUN_MODE_L2ENCAP_RED (L2ENCAP_RED) was introduced, the +condition in seg6_build_state() that excludes L2 encap modes from +setting LWTUNNEL_STATE_OUTPUT_REDIRECT was not updated to account for +the new mode. +As a consequence, L2ENCAP_RED routes incorrectly trigger seg6_output() +on the output path, where the packet is silently dropped because +skb_mac_header_was_set() fails on L3 packets. + +Extend the check to also exclude L2ENCAP_RED, consistent with L2ENCAP. + +Fixes: 13f0296be8ec ("seg6: add support for SRv6 H.L2Encaps.Red behavior") +Cc: stable@vger.kernel.org +Signed-off-by: Andrea Mayer <andrea.mayer@uniroma2.it> +Reviewed-by: Justin Iurman <justin.iurman@gmail.com> +Link: https://patch.msgid.link/20260418162838.31979-1-andrea.mayer@uniroma2.it +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + net/ipv6/seg6_iptunnel.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/net/ipv6/seg6_iptunnel.c ++++ b/net/ipv6/seg6_iptunnel.c +@@ -715,7 +715,8 @@ static int seg6_build_state(struct net * + newts->type = LWTUNNEL_ENCAP_SEG6; + newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT; + +- if (tuninfo->mode != SEG6_IPTUN_MODE_L2ENCAP) ++ if (tuninfo->mode != SEG6_IPTUN_MODE_L2ENCAP && ++ tuninfo->mode != SEG6_IPTUN_MODE_L2ENCAP_RED) + newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT; + + newts->headroom = seg6_lwt_headroom(tuninfo);
diff --git a/queue-7.0/series b/queue-7.0/series index 5258dab..2a7948d 100644 --- a/queue-7.0/series +++ b/queue-7.0/series
@@ -226,3 +226,59 @@ kvm-nsvm-add-missing-consistency-check-for-ncr3-validity.patch kvm-nsvm-raise-ud-if-unhandled-vmmcall-isn-t-intercepted-by-l1.patch kvm-nsvm-always-intercept-vmmcall-when-l2-is-active.patch +arm-9472-1-fix-race-condition-on-pg_dcache_clean-in-__sync_icache_dcache.patch +ring-buffer-do-not-double-count-the-reader_page.patch +ext4-fix-bounds-check-in-check_xattrs-to-prevent-out-of-bounds-access.patch +ext4-fix-missing-brelse-in-ext4_xattr_inode_dec_ref_all.patch +udf-fix-partition-descriptor-append-bookkeeping.patch +mtd-spi-nor-sst-fix-write-enable-before-aai-sequence.patch +mtd-spinand-winbond-declare-the-qe-bit-on-w25nxxjw.patch +amdgpu-jpeg-fix-deepsleep-register-for-jpeg-5_0_0-and-5_0_2.patch +md-md-llbitmap-skip-reading-rdevs-that-are-not-in_sync.patch +md-md-llbitmap-raise-barrier-before-state-machine-transition.patch +md-raid5-fix-soft-lockup-in-retry_aligned_read.patch +md-raid5-validate-payload-size-before-accessing-journal-metadata.patch +check-uapi-link-into-shared-objects.patch +mm-swap-speed-up-hibernation-allocation-and-writeout.patch +hid-apple-ensure-the-keyboard-backlight-is-off-if-suspending.patch +inotify-fix-watch-count-leak-when-fsnotify_add_inode_mark_locked-fails.patch +x86-cpu-disable-fred-when-pti-is-forced-on.patch +x86-shstk-prevent-deadlock-during-shstk-sigreturn.patch +wifi-rtl8xxxu-fix-potential-use-of-uninitialized-value.patch +tcp-call-sk_data_ready-after-listener-migration.patch +taskstats-set-version-in-tgid-exit-notifications.patch +mptcp-sync-the-msk-sndbuf-at-accept-time.patch +mfd-core-preserve-of-node-when-acpi-handle-is-present.patch +9p-fix-access-mode-flags-being-ored-instead-of-replaced.patch +apparmor-use-target-task-s-context-in-apparmor_getprocattr.patch +bluetooth-hci_event-fix-potential-uaf-in-ssp-passkey-handlers.patch +bus-mhi-host-pci_generic-switch-to-async-power-up-to-avoid-boot-delays.patch +can-ucan-fix-devres-lifetime.patch +crypto-acomp-fix-wrong-pointer-stored-by-acomp_save_req.patch +crypto-arm64-aes-fix-32-bit-aes_mac_update-arg-treated-as-64-bit.patch +crypto-atmel-aes-fix-3-page-memory-leak-in-atmel_aes_buff_cleanup.patch +crypto-atmel-ecc-release-client-on-allocation-failure.patch +crypto-hisilicon-fix-dma_unmap_single-direction.patch +crypto-ccree-fix-a-memory-leak-in-cc_mac_digest.patch +crypto-atmel-tdes-fix-dma-sync-direction.patch +crypto-atmel-sha204a-fix-error-codes-in-otp-reads.patch +crypto-atmel-sha204a-fix-potential-uaf-and-memory-leak-in-remove-path.patch +crypto-atmel-sha204a-fix-uninitialized-data-access-on-otp-read-error.patch +crypto-nx-fix-bounce-buffer-leaks-in-nx842_crypto_-alloc-free-_ctx.patch +crypto-nx-fix-context-leak-in-nx842_crypto_free_ctx.patch +crypto-nx-fix-packed-layout-in-struct-nx842_crypto_header.patch +dm-mirror-fix-integer-overflow-in-create_dirty_log.patch +erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch +ceph-fix-num_ops-off-by-one-when-crypto-allocation-fails.patch +ceph-only-d_add-negative-dentries-when-they-are-unhashed.patch +gtp-disable-bh-before-calling-udp_tunnel_xmit_skb.patch +ib-core-fix-zero-dmac-race-in-neighbor-resolution.patch +ktest-fix-the-month-in-the-name-of-the-failure-directory.patch +nfsv4.1-apply-session-size-limits-on-clone-path.patch +ntfs3-add-buffer-boundary-checks-to-run_unpack.patch +ntfs3-fix-integer-overflow-in-run_unpack-volume-boundary-check.patch +rtmutex-use-waiter-task-instead-of-current-in-remove_waiter.patch +rxgk-fix-potential-integer-overflow-in-length-check.patch +sched_ext-documentation-clarify-ops.dispatch-role-in-task-lifecycle.patch +scsi-sd-fix-missing-put_disk-when-device_add-disk_dev-fails.patch +seg6-fix-seg6-lwtunnel-output-redirect-for-l2-reduced-encap-mode.patch
diff --git a/queue-7.0/taskstats-set-version-in-tgid-exit-notifications.patch b/queue-7.0/taskstats-set-version-in-tgid-exit-notifications.patch new file mode 100644 index 0000000..37a9471 --- /dev/null +++ b/queue-7.0/taskstats-set-version-in-tgid-exit-notifications.patch
@@ -0,0 +1,74 @@ +From 16c4f0211aaa1ec1422b11b59f64f1abe9009fc0 Mon Sep 17 00:00:00 2001 +From: Yiyang Chen <cyyzero16@gmail.com> +Date: Mon, 30 Mar 2026 03:00:40 +0800 +Subject: taskstats: set version in TGID exit notifications + +From: Yiyang Chen <cyyzero16@gmail.com> + +commit 16c4f0211aaa1ec1422b11b59f64f1abe9009fc0 upstream. + +delay accounting started populating taskstats records with a valid version +field via fill_pid() and fill_tgid(). + +Later, commit ad4ecbcba728 ("[PATCH] delay accounting taskstats interface +send tgid once") changed the TGID exit path to send the cached +signal->stats aggregate directly instead of building the outgoing record +through fill_tgid(). Unlike fill_tgid(), fill_tgid_exit() only +accumulates accounting data and never initializes stats->version. + +As a result, TGID exit notifications can reach userspace with version == 0 +even though PID exit notifications and TASKSTATS_CMD_GET replies carry a +valid taskstats version. + +This is easy to reproduce with `tools/accounting/getdelays.c`. + +I have a small follow-up patch for that tool which: + +1. increases the receive buffer/message size so the pid+tgid + combined exit notification is not dropped/truncated + +2. prints `stats->version`. + +With that patch, the reproducer is: + + Terminal 1: + ./getdelays -d -v -l -m 0 + + Terminal 2: + taskset -c 0 python3 -c 'import threading,time; t=threading.Thread(target=time.sleep,args=(0.1,)); t.start(); t.join()' + +That produces both PID and TGID exit notifications for the same +process. The PID exit record reports a valid taskstats version, while +the TGID exit record reports `version 0`. + + +This patch (of 2): + +Set stats->version = TASKSTATS_VERSION after copying the cached TGID +aggregate into the outgoing netlink payload so all taskstats records are +self-describing again. + +Link: https://lkml.kernel.org/r/ba83d934e59edd431b693607de573eb9ca059309.1774810498.git.cyyzero16@gmail.com +Fixes: ad4ecbcba728 ("[PATCH] delay accounting taskstats interface send tgid once") +Signed-off-by: Yiyang Chen <cyyzero16@gmail.com> +Cc: Balbir Singh <bsingharora@gmail.com> +Cc: Dr. Thomas Orgis <thomas.orgis@uni-hamburg.de> +Cc: Fan Yu <fan.yu9@zte.com.cn> +Cc: Wang Yaxin <wang.yaxin@zte.com.cn> +Cc: <stable@vger.kernel.org> +Signed-off-by: Andrew Morton <akpm@linux-foundation.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + kernel/taskstats.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/kernel/taskstats.c ++++ b/kernel/taskstats.c +@@ -649,6 +649,7 @@ void taskstats_exit(struct task_struct * + goto err; + + memcpy(stats, tsk->signal->stats, sizeof(*stats)); ++ stats->version = TASKSTATS_VERSION; + + send: + send_cpu_listeners(rep_skb, listeners);
diff --git a/queue-7.0/tcp-call-sk_data_ready-after-listener-migration.patch b/queue-7.0/tcp-call-sk_data_ready-after-listener-migration.patch new file mode 100644 index 0000000..46162c4 --- /dev/null +++ b/queue-7.0/tcp-call-sk_data_ready-after-listener-migration.patch
@@ -0,0 +1,69 @@ +From 3864c6ba1e041bc75342353a70fa2a2c6f909923 Mon Sep 17 00:00:00 2001 +From: Zhenzhong Wu <jt26wzz@gmail.com> +Date: Wed, 22 Apr 2026 10:45:53 +0800 +Subject: tcp: call sk_data_ready() after listener migration + +From: Zhenzhong Wu <jt26wzz@gmail.com> + +commit 3864c6ba1e041bc75342353a70fa2a2c6f909923 upstream. + +When inet_csk_listen_stop() migrates an established child socket from +a closing listener to another socket in the same SO_REUSEPORT group, +the target listener gets a new accept-queue entry via +inet_csk_reqsk_queue_add(), but that path never notifies the target +listener's waiters. A nonblocking accept() still works because it +checks the queue directly, but poll()/epoll_wait() waiters and +blocking accept() callers can also remain asleep indefinitely. + +Call READ_ONCE(nsk->sk_data_ready)(nsk) after a successful migration +in inet_csk_listen_stop(). + +However, after inet_csk_reqsk_queue_add() succeeds, the ref acquired +in reuseport_migrate_sock() is effectively transferred to +nreq->rsk_listener. Another CPU can then dequeue nreq via accept() +or listener shutdown, hit reqsk_put(), and drop that listener ref. +Since listeners are SOCK_RCU_FREE, wrap the post-queue_add() +dereferences of nsk in rcu_read_lock()/rcu_read_unlock(), which also +covers the existing sock_net(nsk) access in that path. + +The reqsk_timer_handler() path does not need the same changes for two +reasons: half-open requests become readable only after the final ACK, +where tcp_child_process() already wakes the listener; and once nreq is +visible via inet_ehash_insert(), the success path no longer touches +nsk directly. + +Fixes: 54b92e841937 ("tcp: Migrate TCP_ESTABLISHED/TCP_SYN_RECV sockets in accept queues.") +Cc: stable@vger.kernel.org +Suggested-by: Eric Dumazet <edumazet@google.com> +Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> +Signed-off-by: Zhenzhong Wu <jt26wzz@gmail.com> +Reviewed-by: Eric Dumazet <edumazet@google.com> +Link: https://patch.msgid.link/20260422024554.130346-2-jt26wzz@gmail.com +Signed-off-by: Jakub Kicinski <kuba@kernel.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + net/ipv4/inet_connection_sock.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -1482,16 +1482,19 @@ void inet_csk_listen_stop(struct sock *s + if (nreq) { + refcount_set(&nreq->rsk_refcnt, 1); + ++ rcu_read_lock(); + if (inet_csk_reqsk_queue_add(nsk, nreq, child)) { + __NET_INC_STATS(sock_net(nsk), + LINUX_MIB_TCPMIGRATEREQSUCCESS); + reqsk_migrate_reset(req); ++ READ_ONCE(nsk->sk_data_ready)(nsk); + } else { + __NET_INC_STATS(sock_net(nsk), + LINUX_MIB_TCPMIGRATEREQFAILURE); + reqsk_migrate_reset(nreq); + __reqsk_free(nreq); + } ++ rcu_read_unlock(); + + /* inet_csk_reqsk_queue_add() has already + * called inet_child_forget() on failure case.
diff --git a/queue-7.0/udf-fix-partition-descriptor-append-bookkeeping.patch b/queue-7.0/udf-fix-partition-descriptor-append-bookkeeping.patch new file mode 100644 index 0000000..c0b6a7c --- /dev/null +++ b/queue-7.0/udf-fix-partition-descriptor-append-bookkeeping.patch
@@ -0,0 +1,56 @@ +From 08841b06fa64d8edbd1a21ca6e613420c90cc4b8 Mon Sep 17 00:00:00 2001 +From: Seohyeon Maeng <bioloidgp@gmail.com> +Date: Tue, 10 Mar 2026 17:16:52 +0900 +Subject: udf: fix partition descriptor append bookkeeping + +From: Seohyeon Maeng <bioloidgp@gmail.com> + +commit 08841b06fa64d8edbd1a21ca6e613420c90cc4b8 upstream. + +Mounting a crafted UDF image with repeated partition descriptors can +trigger a heap out-of-bounds write in part_descs_loc[]. + +handle_partition_descriptor() deduplicates entries by partition number, +but appended slots never record partnum. As a result duplicate +Partition Descriptors are appended repeatedly and num_part_descs keeps +growing. + +Once the table is full, the growth path still sizes the allocation from +partnum even though inserts are indexed by num_part_descs. If partnum is +already aligned to PART_DESC_ALLOC_STEP, ALIGN(partnum, step) can keep +the old capacity and the next append writes past the end of the table. + +Store partnum in the appended slot and size growth from the next append +count so deduplication and capacity tracking follow the same model. + +Fixes: ee4af50ca94f ("udf: Fix mounting of Win7 created UDF filesystems") +Cc: stable@vger.kernel.org +Signed-off-by: Seohyeon Maeng <bioloidgp@gmail.com> +Link: https://patch.msgid.link/20260310081652.21220-1-bioloidgp@gmail.com +Signed-off-by: Jan Kara <jack@suse.cz> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + fs/udf/super.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/fs/udf/super.c ++++ b/fs/udf/super.c +@@ -1694,8 +1694,9 @@ static struct udf_vds_record *handle_par + return &(data->part_descs_loc[i].rec); + if (data->num_part_descs >= data->size_part_descs) { + struct part_desc_seq_scan_data *new_loc; +- unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); ++ unsigned int new_size; + ++ new_size = data->num_part_descs + PART_DESC_ALLOC_STEP; + new_loc = kzalloc_objs(*new_loc, new_size); + if (!new_loc) + return ERR_PTR(-ENOMEM); +@@ -1705,6 +1706,7 @@ static struct udf_vds_record *handle_par + data->part_descs_loc = new_loc; + data->size_part_descs = new_size; + } ++ data->part_descs_loc[data->num_part_descs].partnum = partnum; + return &(data->part_descs_loc[data->num_part_descs++].rec); + } +
diff --git a/queue-7.0/wifi-rtl8xxxu-fix-potential-use-of-uninitialized-value.patch b/queue-7.0/wifi-rtl8xxxu-fix-potential-use-of-uninitialized-value.patch new file mode 100644 index 0000000..3dcdc2e --- /dev/null +++ b/queue-7.0/wifi-rtl8xxxu-fix-potential-use-of-uninitialized-value.patch
@@ -0,0 +1,90 @@ +From f8a2fc809bfeb49130709b31a4d357a049f28547 Mon Sep 17 00:00:00 2001 +From: Yi Cong <yicong@kylinos.cn> +Date: Fri, 6 Mar 2026 15:16:27 +0800 +Subject: wifi: rtl8xxxu: fix potential use of uninitialized value + +From: Yi Cong <yicong@kylinos.cn> + +commit f8a2fc809bfeb49130709b31a4d357a049f28547 upstream. + +The local variables 'mcs' and 'nss' in rtl8xxxu_update_ra_report() are +passed to rtl8xxxu_desc_to_mcsrate() as output parameters. If the helper +function encounters an unhandled rate index, it may return without setting +these values, leading to the use of uninitialized stack data. + +Remove the helper rtl8xxxu_desc_to_mcsrate() and inline the logic into +rtl8xxxu_update_ra_report(). This fixes the use of uninitialized 'mcs' +and 'nss' variables for legacy rates. + +The new implementation explicitly handles: +- Legacy rates: Set bitrate only. +- HT rates (MCS0-15): Set MCS flags, index, and NSS (1 or 2) directly. +- Invalid rates: Return early. + +Fixes: 7de16123d9e2 ("wifi: rtl8xxxu: Introduce rtl8xxxu_update_ra_report") +Cc: stable@vger.kernel.org +Suggested-by: Ping-Ke Shih <pkshih@realtek.com> +Signed-off-by: Yi Cong <yicong@kylinos.cn> +Link: https://lore.kernel.org/all/96e31963da0c42dcb52ce44f818963d7@realtek.com/ +Signed-off-by: Ping-Ke Shih <pkshih@realtek.com> +Link: https://patch.msgid.link/20260306071627.56501-1-cong.yi@linux.dev +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + drivers/net/wireless/realtek/rtl8xxxu/core.c | 28 +++++++-------------------- + 1 file changed, 8 insertions(+), 20 deletions(-) + +--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c ++++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c +@@ -4697,20 +4697,6 @@ static const struct ieee80211_rate rtl8x + {.bitrate = 540, .hw_value = 0x0b,}, + }; + +-static void rtl8xxxu_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss) +-{ +- if (rate <= DESC_RATE_54M) +- return; +- +- if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) { +- if (rate < DESC_RATE_MCS8) +- *nss = 1; +- else +- *nss = 2; +- *mcs = rate - DESC_RATE_MCS0; +- } +-} +- + static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg) + { + struct ieee80211_hw *hw = priv->hw; +@@ -4820,23 +4806,25 @@ static void rtl8xxxu_set_aifs(struct rtl + void rtl8xxxu_update_ra_report(struct rtl8xxxu_ra_report *rarpt, + u8 rate, u8 sgi, u8 bw) + { +- u8 mcs, nss; +- + rarpt->txrate.flags = 0; + + if (rate <= DESC_RATE_54M) { + rarpt->txrate.legacy = rtl8xxxu_legacy_ratetable[rate].bitrate; +- } else { +- rtl8xxxu_desc_to_mcsrate(rate, &mcs, &nss); ++ } else if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) { + rarpt->txrate.flags |= RATE_INFO_FLAGS_MCS; ++ if (rate < DESC_RATE_MCS8) ++ rarpt->txrate.nss = 1; ++ else ++ rarpt->txrate.nss = 2; + +- rarpt->txrate.mcs = mcs; +- rarpt->txrate.nss = nss; ++ rarpt->txrate.mcs = rate - DESC_RATE_MCS0; + + if (sgi) + rarpt->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + + rarpt->txrate.bw = bw; ++ } else { ++ return; + } + + rarpt->bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
diff --git a/queue-7.0/x86-cpu-disable-fred-when-pti-is-forced-on.patch b/queue-7.0/x86-cpu-disable-fred-when-pti-is-forced-on.patch new file mode 100644 index 0000000..98fe57c --- /dev/null +++ b/queue-7.0/x86-cpu-disable-fred-when-pti-is-forced-on.patch
@@ -0,0 +1,69 @@ +From 932d922285ef4d0d655a6f5def2779ae86ca0d73 Mon Sep 17 00:00:00 2001 +From: Dave Hansen <dave.hansen@linux.intel.com> +Date: Tue, 21 Apr 2026 09:31:36 -0700 +Subject: x86/cpu: Disable FRED when PTI is forced on + +From: Dave Hansen <dave.hansen@linux.intel.com> + +commit 932d922285ef4d0d655a6f5def2779ae86ca0d73 upstream. + +FRED and PTI were never intended to work together. No FRED hardware is +vulnerable to Meltdown and all of it should have LASS anyway. +Nevertheless, if you boot a system with pti=on and fred=on, the kernel +tries to do what is asked of it and dies a horrible death on the first +attempt to run userspace (since it never switches to the user page +tables). + +Disable FRED when PTI is forced on, and print a warning about it. + +A quick brain dump about what a FRED+PTI implementation would look like +is below. I'm not sure it would make any sense to do it, but never say +never. All I know is that it's way too complicated to be worth it today. + +<brain dump> +The SWITCH_TO_USER/KERNEL_CR3 bits are simple to fix (or at least we +have the assembly tools to do it already), as is sticking the FRED entry +text in .entry.text (it's not in there today). + +The nasty part is the stacks. Today, the CPU pops into the kernel on +MSR_IA32_FRED_RSP0 which is normal old kernel memory and not mapped to +userspace. The hardware pushes gunk on to MSR_IA32_FRED_RSP0, which is +currently the task stacks. MSR_IA32_FRED_RSP0 would need to point +elsewhere, probably cpu_entry_stack(). Then, start playing games with +stacks on entry/exit, including copying gunk to and from the task stack. + +While I'd *like* to have PTI everywhere, I'm not sure it's worth mucking +up the FRED code with PTI kludges. If a user wants fast entry/exit, they +use FRED. If you want PTI (and sekuritay), you certainly don't care +about fast entry and FRED isn't going to help you *all* that much, so +you can just stay with the IDT. + +Plus, FRED hardware should have LASS which gives you a similar security +profile to PTI without the CR3 munging. +</brain dump> + +Reported-by: Gayatri Kammela <Gayatri.Kammela@amd.com> +Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> +Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de> +Tested-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com> +Cc:stable@vger.kernel.org +Link: https://patch.msgid.link/20260421163136.E7C6788A@davehans-spike.ostc.intel.com +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/x86/mm/pti.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/arch/x86/mm/pti.c ++++ b/arch/x86/mm/pti.c +@@ -105,6 +105,11 @@ void __init pti_check_boottime_disable(v + pr_debug("PTI enabled, disabling INVLPGB\n"); + setup_clear_cpu_cap(X86_FEATURE_INVLPGB); + } ++ ++ if (cpu_feature_enabled(X86_FEATURE_FRED)) { ++ pr_debug("PTI enabled, disabling FRED\n"); ++ setup_clear_cpu_cap(X86_FEATURE_FRED); ++ } + } + + static int __init pti_parse_cmdline(char *arg)
diff --git a/queue-7.0/x86-shstk-prevent-deadlock-during-shstk-sigreturn.patch b/queue-7.0/x86-shstk-prevent-deadlock-during-shstk-sigreturn.patch new file mode 100644 index 0000000..2d05716 --- /dev/null +++ b/queue-7.0/x86-shstk-prevent-deadlock-during-shstk-sigreturn.patch
@@ -0,0 +1,134 @@ +From 9874b2917b9fbc30956fee209d3c4aa47201c64e Mon Sep 17 00:00:00 2001 +From: Rick Edgecombe <rick.p.edgecombe@intel.com> +Date: Thu, 9 Apr 2026 11:43:30 -0700 +Subject: x86/shstk: Prevent deadlock during shstk sigreturn + +From: Rick Edgecombe <rick.p.edgecombe@intel.com> + +commit 9874b2917b9fbc30956fee209d3c4aa47201c64e upstream. + +During sigreturn the shadow stack signal frame is popped. The kernel does +this by reading the shadow stack using normal read accesses. When it can't +assume the memory is shadow stack, it takes extra steps to makes sure it is +reading actual shadow stack memory and not other normal readable memory. It +does this by holding the mmap read lock while doing the access and checking +the flags of the VMA. + +Unfortunately that is not safe. If the read of the shadow stack sigframe +hits a page fault, the fault handler will try to recursively grab another +mmap read lock. This normally works ok, but if a writer on another CPU is +also waiting, the second read lock could fail and cause a deadlock. + +Fix this by not holding mmap lock during the read access to userspace. + +Instead use mmap_lock_speculate_...() to watch for changes between dropping +mmap lock and the userspace access. Retry if anything grabbed an mmap write +lock in between and could have changed the VMA. + +These mmap_lock_speculate_...() helpers use mm::mm_lock_seq, which is only +available when PER_VMA_LOCK is configured. So make X86_USER_SHADOW_STACK +depend on it. On x86, PER_VMA_LOCK is a default configuration for SMP +kernels. So drop support for the other configs under the assumption that +the !SMP shadow stack user base does not exist. + +Currently there is a check that skips the lookup work when the SSP can be +assumed to be on a shadow stack. While reorganizing the function, remove +the optimization to make the tricky code flows more common, such that +issues like this cannot escape detection for so long. + +Fixes: 7fad2a432cd3 ("x86/shstk: Check that signal frame is shadow stack mem") +Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> +Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> +Signed-off-by: Thomas Gleixner <tglx@kernel.org> +Reviewed-by: Dave Hansen <dave.hansen@intel.com> +Reviewed-by: Thomas Gleixner <tglx@kernel.org> +Cc: stable@vger.kernel.org +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +--- + arch/x86/Kconfig | 1 + + arch/x86/kernel/shstk.c | 42 +++++++++++++++++++++++------------------- + 2 files changed, 24 insertions(+), 19 deletions(-) + +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -1889,6 +1889,7 @@ config X86_USER_SHADOW_STACK + bool "X86 userspace shadow stack" + depends on AS_WRUSS + depends on X86_64 ++ depends on PER_VMA_LOCK + select ARCH_USES_HIGH_VMA_FLAGS + select ARCH_HAS_USER_SHADOW_STACK + select X86_CET +--- a/arch/x86/kernel/shstk.c ++++ b/arch/x86/kernel/shstk.c +@@ -334,10 +334,8 @@ static int shstk_push_sigframe(unsigned + + static int shstk_pop_sigframe(unsigned long *ssp) + { +- struct vm_area_struct *vma; + unsigned long token_addr; +- bool need_to_check_vma; +- int err = 1; ++ unsigned int seq; + + /* + * It is possible for the SSP to be off the end of a shadow stack by 4 +@@ -348,25 +346,35 @@ static int shstk_pop_sigframe(unsigned l + if (!IS_ALIGNED(*ssp, 8)) + return -EINVAL; + +- need_to_check_vma = PAGE_ALIGN(*ssp) == *ssp; ++ do { ++ struct vm_area_struct *vma; ++ bool valid_vma; ++ int err; + +- if (need_to_check_vma) + if (mmap_read_lock_killable(current->mm)) + return -EINTR; + +- err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp); +- if (unlikely(err)) +- goto out_err; +- +- if (need_to_check_vma) { + vma = find_vma(current->mm, *ssp); +- if (!vma || !(vma->vm_flags & VM_SHADOW_STACK)) { +- err = -EFAULT; +- goto out_err; +- } ++ valid_vma = vma && (vma->vm_flags & VM_SHADOW_STACK); + ++ /* ++ * VMAs can change between get_shstk_data() and find_vma(). ++ * Watch for changes and ensure that 'token_addr' comes from ++ * 'vma' by recording a seqcount. ++ * ++ * Ignore the return value of mmap_lock_speculate_try_begin() ++ * because the mmap lock excludes the possibility of writers. ++ */ ++ mmap_lock_speculate_try_begin(current->mm, &seq); + mmap_read_unlock(current->mm); +- } ++ ++ if (!valid_vma) ++ return -EINVAL; ++ ++ err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp); ++ if (err) ++ return err; ++ } while (mmap_lock_speculate_retry(current->mm, seq)); + + /* Restore SSP aligned? */ + if (unlikely(!IS_ALIGNED(token_addr, 8))) +@@ -379,10 +387,6 @@ static int shstk_pop_sigframe(unsigned l + *ssp = token_addr; + + return 0; +-out_err: +- if (need_to_check_vma) +- mmap_read_unlock(current->mm); +- return err; + } + + int setup_signal_shadow_stack(struct ksignal *ksig)