| From: "Mike Rapoport (Microsoft)" <rppt@kernel.org> |
| Subject: kho: drop notifiers |
| Date: Tue, 7 Oct 2025 03:30:56 +0000 |
| |
| The KHO framework uses a notifier chain as the mechanism for clients to |
| participate in the finalization process. While this works for a single, |
| central state machine, it is too restrictive for kernel-internal |
| components like pstore/reserve_mem or IMA. These components need a |
| simpler, direct way to register their state for preservation (e.g., during |
| their initcall) without being part of a complex, shutdown-time notifier |
| sequence. The notifier model forces all participants into a single |
| finalization flow and makes direct preservation from an arbitrary context |
| difficult. This patch refactors the client participation model by |
| removing the notifier chain and introducing a direct API for managing FDT |
| subtrees. |
| |
| The core kho_finalize() and kho_abort() state machine remains, but clients |
| now register their data with KHO beforehand. |
| |
| Link: https://lkml.kernel.org/r/20251007033100.836886-4-pasha.tatashin@soleen.com |
| Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> |
| Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com> |
| Cc: Alexander Graf <graf@amazon.com> |
| Cc: Christian Brauner <brauner@kernel.org> |
| Cc: Jason Gunthorpe <jgg@nvidia.com> |
| Cc: Jason Gunthorpe <jgg@ziepe.ca> |
| Cc: Jonathan Corbet <corbet@lwn.net> |
| Cc: Masahiro Yamada <masahiroy@kernel.org> |
| Cc: Miguel Ojeda <ojeda@kernel.org> |
| Cc: Pratyush Yadav <pratyush@kernel.org> |
| Cc: Randy Dunlap <rdunlap@infradead.org> |
| Cc: Tejun Heo <tj@kernel.org> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| include/linux/kexec_handover.h | 28 ---- |
| kernel/kexec_handover.c | 164 ++++++++++++++++------------- |
| kernel/kexec_handover_debug.c | 15 +- |
| kernel/kexec_handover_internal.h | 5 |
| lib/test_kho.c | 30 ----- |
| mm/memblock.c | 60 +--------- |
| 6 files changed, 124 insertions(+), 178 deletions(-) |
| |
| --- a/include/linux/kexec_handover.h~kho-drop-notifiers |
| +++ a/include/linux/kexec_handover.h |
| @@ -10,14 +10,7 @@ struct kho_scratch { |
| phys_addr_t size; |
| }; |
| |
| -/* KHO Notifier index */ |
| -enum kho_event { |
| - KEXEC_KHO_FINALIZE = 0, |
| - KEXEC_KHO_ABORT = 1, |
| -}; |
| - |
| struct folio; |
| -struct notifier_block; |
| struct page; |
| |
| #define DECLARE_KHOSER_PTR(name, type) \ |
| @@ -37,8 +30,6 @@ struct page; |
| (typeof((s).ptr))((s).phys ? phys_to_virt((s).phys) : NULL); \ |
| }) |
| |
| -struct kho_serialization; |
| - |
| struct kho_vmalloc_chunk; |
| struct kho_vmalloc { |
| DECLARE_KHOSER_PTR(first, struct kho_vmalloc_chunk *); |
| @@ -57,12 +48,10 @@ int kho_preserve_vmalloc(void *ptr, stru |
| struct folio *kho_restore_folio(phys_addr_t phys); |
| struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages); |
| void *kho_restore_vmalloc(const struct kho_vmalloc *preservation); |
| -int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt); |
| +int kho_add_subtree(const char *name, void *fdt); |
| +void kho_remove_subtree(void *fdt); |
| int kho_retrieve_subtree(const char *name, phys_addr_t *phys); |
| |
| -int register_kho_notifier(struct notifier_block *nb); |
| -int unregister_kho_notifier(struct notifier_block *nb); |
| - |
| void kho_memory_init(void); |
| |
| void kho_populate(phys_addr_t fdt_phys, u64 fdt_len, phys_addr_t scratch_phys, |
| @@ -114,23 +103,16 @@ static inline void *kho_restore_vmalloc( |
| return NULL; |
| } |
| |
| -static inline int kho_add_subtree(struct kho_serialization *ser, |
| - const char *name, void *fdt) |
| +static inline int kho_add_subtree(const char *name, void *fdt) |
| { |
| return -EOPNOTSUPP; |
| } |
| |
| -static inline int kho_retrieve_subtree(const char *name, phys_addr_t *phys) |
| +static inline void kho_remove_subtree(void *fdt) |
| { |
| - return -EOPNOTSUPP; |
| } |
| |
| -static inline int register_kho_notifier(struct notifier_block *nb) |
| -{ |
| - return -EOPNOTSUPP; |
| -} |
| - |
| -static inline int unregister_kho_notifier(struct notifier_block *nb) |
| +static inline int kho_retrieve_subtree(const char *name, phys_addr_t *phys) |
| { |
| return -EOPNOTSUPP; |
| } |
| --- a/kernel/kexec_handover.c~kho-drop-notifiers |
| +++ a/kernel/kexec_handover.c |
| @@ -15,7 +15,6 @@ |
| #include <linux/libfdt.h> |
| #include <linux/list.h> |
| #include <linux/memblock.h> |
| -#include <linux/notifier.h> |
| #include <linux/page-isolation.h> |
| #include <linux/vmalloc.h> |
| |
| @@ -99,29 +98,34 @@ struct kho_mem_track { |
| |
| struct khoser_mem_chunk; |
| |
| -struct kho_serialization { |
| - struct page *fdt; |
| - struct kho_mem_track track; |
| - /* First chunk of serialized preserved memory map */ |
| - struct khoser_mem_chunk *preserved_mem_map; |
| +struct kho_sub_fdt { |
| + struct list_head l; |
| + const char *name; |
| + void *fdt; |
| }; |
| |
| struct kho_out { |
| - struct blocking_notifier_head chain_head; |
| - struct mutex lock; /* protects KHO FDT finalization */ |
| - struct kho_serialization ser; |
| + void *fdt; |
| bool finalized; |
| + struct mutex lock; /* protects KHO FDT finalization */ |
| + |
| + struct list_head sub_fdts; |
| + struct mutex fdts_lock; |
| + |
| + struct kho_mem_track track; |
| + /* First chunk of serialized preserved memory map */ |
| + struct khoser_mem_chunk *preserved_mem_map; |
| + |
| struct kho_debugfs dbg; |
| }; |
| |
| static struct kho_out kho_out = { |
| - .chain_head = BLOCKING_NOTIFIER_INIT(kho_out.chain_head), |
| .lock = __MUTEX_INITIALIZER(kho_out.lock), |
| - .ser = { |
| - .track = { |
| - .orders = XARRAY_INIT(kho_out.ser.track.orders, 0), |
| - }, |
| + .track = { |
| + .orders = XARRAY_INIT(kho_out.track.orders, 0), |
| }, |
| + .sub_fdts = LIST_HEAD_INIT(kho_out.sub_fdts), |
| + .fdts_lock = __MUTEX_INITIALIZER(kho_out.fdts_lock), |
| .finalized = false, |
| }; |
| |
| @@ -362,14 +366,14 @@ static void kho_mem_ser_free(struct khos |
| } |
| } |
| |
| -static int kho_mem_serialize(struct kho_serialization *ser) |
| +static int kho_mem_serialize(struct kho_out *kho_out) |
| { |
| struct khoser_mem_chunk *first_chunk = NULL; |
| struct khoser_mem_chunk *chunk = NULL; |
| struct kho_mem_phys *physxa; |
| unsigned long order; |
| |
| - xa_for_each(&ser->track.orders, order, physxa) { |
| + xa_for_each(&kho_out->track.orders, order, physxa) { |
| struct kho_mem_phys_bits *bits; |
| unsigned long phys; |
| |
| @@ -397,7 +401,7 @@ static int kho_mem_serialize(struct kho_ |
| } |
| } |
| |
| - ser->preserved_mem_map = first_chunk; |
| + kho_out->preserved_mem_map = first_chunk; |
| |
| return 0; |
| |
| @@ -658,7 +662,6 @@ err_disable_kho: |
| |
| /** |
| * kho_add_subtree - record the physical address of a sub FDT in KHO root tree. |
| - * @ser: serialization control object passed by KHO notifiers. |
| * @name: name of the sub tree. |
| * @fdt: the sub tree blob. |
| * |
| @@ -672,34 +675,45 @@ err_disable_kho: |
| * |
| * Return: 0 on success, error code on failure |
| */ |
| -int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt) |
| +int kho_add_subtree(const char *name, void *fdt) |
| { |
| - int err = 0; |
| - u64 phys = (u64)virt_to_phys(fdt); |
| - void *root = page_to_virt(ser->fdt); |
| + struct kho_sub_fdt *sub_fdt; |
| + int err; |
| |
| - err |= fdt_begin_node(root, name); |
| - err |= fdt_property(root, PROP_SUB_FDT, &phys, sizeof(phys)); |
| - err |= fdt_end_node(root); |
| + sub_fdt = kmalloc(sizeof(*sub_fdt), GFP_KERNEL); |
| + if (!sub_fdt) |
| + return -ENOMEM; |
| |
| - if (err) |
| - return err; |
| + INIT_LIST_HEAD(&sub_fdt->l); |
| + sub_fdt->name = name; |
| + sub_fdt->fdt = fdt; |
| + |
| + mutex_lock(&kho_out.fdts_lock); |
| + list_add_tail(&sub_fdt->l, &kho_out.sub_fdts); |
| + err = kho_debugfs_fdt_add(&kho_out.dbg, name, fdt, false); |
| + mutex_unlock(&kho_out.fdts_lock); |
| |
| - return kho_debugfs_fdt_add(&kho_out.dbg, name, fdt, false); |
| + return err; |
| } |
| EXPORT_SYMBOL_GPL(kho_add_subtree); |
| |
| -int register_kho_notifier(struct notifier_block *nb) |
| +void kho_remove_subtree(void *fdt) |
| { |
| - return blocking_notifier_chain_register(&kho_out.chain_head, nb); |
| -} |
| -EXPORT_SYMBOL_GPL(register_kho_notifier); |
| + struct kho_sub_fdt *sub_fdt; |
| + |
| + mutex_lock(&kho_out.fdts_lock); |
| + list_for_each_entry(sub_fdt, &kho_out.sub_fdts, l) { |
| + if (sub_fdt->fdt == fdt) { |
| + list_del(&sub_fdt->l); |
| + kfree(sub_fdt); |
| + kho_debugfs_fdt_remove(&kho_out.dbg, fdt); |
| + break; |
| + } |
| + } |
| + mutex_unlock(&kho_out.fdts_lock); |
| |
| -int unregister_kho_notifier(struct notifier_block *nb) |
| -{ |
| - return blocking_notifier_chain_unregister(&kho_out.chain_head, nb); |
| } |
| -EXPORT_SYMBOL_GPL(unregister_kho_notifier); |
| +EXPORT_SYMBOL_GPL(kho_remove_subtree); |
| |
| /** |
| * kho_preserve_folio - preserve a folio across kexec. |
| @@ -714,7 +728,7 @@ int kho_preserve_folio(struct folio *fol |
| { |
| const unsigned long pfn = folio_pfn(folio); |
| const unsigned int order = folio_order(folio); |
| - struct kho_mem_track *track = &kho_out.ser.track; |
| + struct kho_mem_track *track = &kho_out.track; |
| |
| return __kho_preserve_order(track, pfn, order); |
| } |
| @@ -732,7 +746,7 @@ EXPORT_SYMBOL_GPL(kho_preserve_folio); |
| */ |
| int kho_preserve_pages(struct page *page, unsigned int nr_pages) |
| { |
| - struct kho_mem_track *track = &kho_out.ser.track; |
| + struct kho_mem_track *track = &kho_out.track; |
| const unsigned long start_pfn = page_to_pfn(page); |
| const unsigned long end_pfn = start_pfn + nr_pages; |
| unsigned long pfn = start_pfn; |
| @@ -828,7 +842,7 @@ err_free: |
| |
| static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk) |
| { |
| - struct kho_mem_track *track = &kho_out.ser.track; |
| + struct kho_mem_track *track = &kho_out.track; |
| unsigned long pfn = PHYS_PFN(virt_to_phys(chunk)); |
| |
| __kho_unpreserve(track, pfn, pfn + 1); |
| @@ -1010,11 +1024,11 @@ EXPORT_SYMBOL_GPL(kho_restore_vmalloc); |
| |
| static int __kho_abort(void) |
| { |
| - int err; |
| + int err = 0; |
| unsigned long order; |
| struct kho_mem_phys *physxa; |
| |
| - xa_for_each(&kho_out.ser.track.orders, order, physxa) { |
| + xa_for_each(&kho_out.track.orders, order, physxa) { |
| struct kho_mem_phys_bits *bits; |
| unsigned long phys; |
| |
| @@ -1024,17 +1038,13 @@ static int __kho_abort(void) |
| xa_destroy(&physxa->phys_bits); |
| kfree(physxa); |
| } |
| - xa_destroy(&kho_out.ser.track.orders); |
| + xa_destroy(&kho_out.track.orders); |
| |
| - if (kho_out.ser.preserved_mem_map) { |
| - kho_mem_ser_free(kho_out.ser.preserved_mem_map); |
| - kho_out.ser.preserved_mem_map = NULL; |
| + if (kho_out.preserved_mem_map) { |
| + kho_mem_ser_free(kho_out.preserved_mem_map); |
| + kho_out.preserved_mem_map = NULL; |
| } |
| |
| - err = blocking_notifier_call_chain(&kho_out.chain_head, KEXEC_KHO_ABORT, |
| - NULL); |
| - err = notifier_to_errno(err); |
| - |
| if (err) |
| pr_err("Failed to abort KHO finalization: %d\n", err); |
| |
| @@ -1057,7 +1067,8 @@ int kho_abort(void) |
| return ret; |
| |
| kho_out.finalized = false; |
| - kho_debugfs_cleanup(&kho_out.dbg); |
| + |
| + kho_debugfs_fdt_remove(&kho_out.dbg, kho_out.fdt); |
| |
| return 0; |
| } |
| @@ -1066,41 +1077,46 @@ static int __kho_finalize(void) |
| { |
| int err = 0; |
| u64 *preserved_mem_map; |
| - void *fdt = page_to_virt(kho_out.ser.fdt); |
| + void *root = kho_out.fdt; |
| + struct kho_sub_fdt *fdt; |
| |
| - err |= fdt_create(fdt, PAGE_SIZE); |
| - err |= fdt_finish_reservemap(fdt); |
| - err |= fdt_begin_node(fdt, ""); |
| - err |= fdt_property_string(fdt, "compatible", KHO_FDT_COMPATIBLE); |
| + err |= fdt_create(root, PAGE_SIZE); |
| + err |= fdt_finish_reservemap(root); |
| + err |= fdt_begin_node(root, ""); |
| + err |= fdt_property_string(root, "compatible", KHO_FDT_COMPATIBLE); |
| /** |
| * Reserve the preserved-memory-map property in the root FDT, so |
| * that all property definitions will precede subnodes created by |
| * KHO callers. |
| */ |
| - err |= fdt_property_placeholder(fdt, PROP_PRESERVED_MEMORY_MAP, |
| + err |= fdt_property_placeholder(root, PROP_PRESERVED_MEMORY_MAP, |
| sizeof(*preserved_mem_map), |
| (void **)&preserved_mem_map); |
| if (err) |
| goto abort; |
| |
| - err = kho_preserve_folio(page_folio(kho_out.ser.fdt)); |
| + err = kho_preserve_folio(virt_to_folio(kho_out.fdt)); |
| if (err) |
| goto abort; |
| |
| - err = blocking_notifier_call_chain(&kho_out.chain_head, |
| - KEXEC_KHO_FINALIZE, &kho_out.ser); |
| - err = notifier_to_errno(err); |
| + err = kho_mem_serialize(&kho_out); |
| if (err) |
| goto abort; |
| |
| - err = kho_mem_serialize(&kho_out.ser); |
| - if (err) |
| - goto abort; |
| + *preserved_mem_map = (u64)virt_to_phys(kho_out.preserved_mem_map); |
| |
| - *preserved_mem_map = (u64)virt_to_phys(kho_out.ser.preserved_mem_map); |
| + mutex_lock(&kho_out.fdts_lock); |
| + list_for_each_entry(fdt, &kho_out.sub_fdts, l) { |
| + phys_addr_t phys = virt_to_phys(fdt->fdt); |
| + |
| + err |= fdt_begin_node(root, fdt->name); |
| + err |= fdt_property(root, PROP_SUB_FDT, &phys, sizeof(phys)); |
| + err |= fdt_end_node(root); |
| + }; |
| + mutex_unlock(&kho_out.fdts_lock); |
| |
| - err |= fdt_end_node(fdt); |
| - err |= fdt_finish(fdt); |
| + err |= fdt_end_node(root); |
| + err |= fdt_finish(root); |
| |
| abort: |
| if (err) { |
| @@ -1129,7 +1145,7 @@ int kho_finalize(void) |
| kho_out.finalized = true; |
| |
| return kho_debugfs_fdt_add(&kho_out.dbg, "fdt", |
| - page_to_virt(kho_out.ser.fdt), true); |
| + kho_out.fdt, true); |
| } |
| |
| bool kho_finalized(void) |
| @@ -1212,15 +1228,17 @@ static __init int kho_init(void) |
| { |
| int err = 0; |
| const void *fdt = kho_get_fdt(); |
| + struct page *fdt_page; |
| |
| if (!kho_enable) |
| return 0; |
| |
| - kho_out.ser.fdt = alloc_page(GFP_KERNEL); |
| - if (!kho_out.ser.fdt) { |
| + fdt_page = alloc_page(GFP_KERNEL); |
| + if (!fdt_page) { |
| err = -ENOMEM; |
| goto err_free_scratch; |
| } |
| + kho_out.fdt = page_to_virt(fdt_page); |
| |
| err = kho_debugfs_init(); |
| if (err) |
| @@ -1248,8 +1266,8 @@ static __init int kho_init(void) |
| return 0; |
| |
| err_free_fdt: |
| - put_page(kho_out.ser.fdt); |
| - kho_out.ser.fdt = NULL; |
| + put_page(fdt_page); |
| + kho_out.fdt = NULL; |
| err_free_scratch: |
| for (int i = 0; i < kho_scratch_cnt; i++) { |
| void *start = __va(kho_scratch[i].addr); |
| @@ -1260,7 +1278,7 @@ err_free_scratch: |
| kho_enable = false; |
| return err; |
| } |
| -late_initcall(kho_init); |
| +fs_initcall(kho_init); |
| |
| static void __init kho_release_scratch(void) |
| { |
| @@ -1396,7 +1414,7 @@ int kho_fill_kimage(struct kimage *image |
| if (!kho_out.finalized) |
| return 0; |
| |
| - image->kho.fdt = page_to_phys(kho_out.ser.fdt); |
| + image->kho.fdt = virt_to_phys(kho_out.fdt); |
| |
| scratch_size = sizeof(*kho_scratch) * kho_scratch_cnt; |
| scratch = (struct kexec_buf){ |
| --- a/kernel/kexec_handover_debug.c~kho-drop-notifiers |
| +++ a/kernel/kexec_handover_debug.c |
| @@ -61,14 +61,17 @@ int kho_debugfs_fdt_add(struct kho_debug |
| return __kho_debugfs_fdt_add(&dbg->fdt_list, dir, name, fdt); |
| } |
| |
| -void kho_debugfs_cleanup(struct kho_debugfs *dbg) |
| +void kho_debugfs_fdt_remove(struct kho_debugfs *dbg, void *fdt) |
| { |
| - struct fdt_debugfs *ff, *tmp; |
| + struct fdt_debugfs *ff; |
| |
| - list_for_each_entry_safe(ff, tmp, &dbg->fdt_list, list) { |
| - debugfs_remove(ff->file); |
| - list_del(&ff->list); |
| - kfree(ff); |
| + list_for_each_entry(ff, &dbg->fdt_list, list) { |
| + if (ff->wrapper.data == fdt) { |
| + debugfs_remove(ff->file); |
| + list_del(&ff->list); |
| + kfree(ff); |
| + break; |
| + } |
| } |
| } |
| |
| --- a/kernel/kexec_handover_internal.h~kho-drop-notifiers |
| +++ a/kernel/kexec_handover_internal.h |
| @@ -30,7 +30,7 @@ void kho_in_debugfs_init(struct kho_debu |
| int kho_out_debugfs_init(struct kho_debugfs *dbg); |
| int kho_debugfs_fdt_add(struct kho_debugfs *dbg, const char *name, |
| const void *fdt, bool root); |
| -void kho_debugfs_cleanup(struct kho_debugfs *dbg); |
| +void kho_debugfs_fdt_remove(struct kho_debugfs *dbg, void *fdt); |
| #else |
| static inline int kho_debugfs_init(void) { return 0; } |
| static inline void kho_in_debugfs_init(struct kho_debugfs *dbg, |
| @@ -38,7 +38,8 @@ static inline void kho_in_debugfs_init(s |
| static inline int kho_out_debugfs_init(struct kho_debugfs *dbg) { return 0; } |
| static inline int kho_debugfs_fdt_add(struct kho_debugfs *dbg, const char *name, |
| const void *fdt, bool root) { return 0; } |
| -static inline void kho_debugfs_cleanup(struct kho_debugfs *dbg) {} |
| +static inline void kho_debugfs_fdt_remove(struct kho_debugfs *dbg, |
| + void *fdt) { } |
| #endif /* CONFIG_KEXEC_HANDOVER_DEBUGFS */ |
| |
| #endif /* LINUX_KEXEC_HANDOVER_INTERNAL_H */ |
| --- a/lib/test_kho.c~kho-drop-notifiers |
| +++ a/lib/test_kho.c |
| @@ -39,33 +39,17 @@ struct kho_test_state { |
| |
| static struct kho_test_state kho_test_state; |
| |
| -static int kho_test_notifier(struct notifier_block *self, unsigned long cmd, |
| - void *v) |
| +static int kho_test(void) |
| { |
| struct kho_test_state *state = &kho_test_state; |
| - struct kho_serialization *ser = v; |
| int err = 0; |
| |
| - switch (cmd) { |
| - case KEXEC_KHO_ABORT: |
| - return NOTIFY_DONE; |
| - case KEXEC_KHO_FINALIZE: |
| - /* Handled below */ |
| - break; |
| - default: |
| - return NOTIFY_BAD; |
| - } |
| - |
| err |= kho_preserve_folio(state->fdt); |
| - err |= kho_add_subtree(ser, KHO_TEST_FDT, folio_address(state->fdt)); |
| + err |= kho_add_subtree(KHO_TEST_FDT, folio_address(state->fdt)); |
| |
| return err ? NOTIFY_BAD : NOTIFY_DONE; |
| } |
| |
| -static struct notifier_block kho_test_nb = { |
| - .notifier_call = kho_test_notifier, |
| -}; |
| - |
| static int kho_test_save_data(struct kho_test_state *state, void *fdt) |
| { |
| phys_addr_t *folios_info __free(kvfree) = NULL; |
| @@ -102,6 +86,9 @@ static int kho_test_save_data(struct kho |
| if (!err) |
| state->folios_info = no_free_ptr(folios_info); |
| |
| + if (!err) |
| + err = kho_test(); |
| + |
| return err; |
| } |
| |
| @@ -203,14 +190,8 @@ static int kho_test_save(void) |
| if (err) |
| goto err_free_folios; |
| |
| - err = register_kho_notifier(&kho_test_nb); |
| - if (err) |
| - goto err_free_fdt; |
| - |
| return 0; |
| |
| -err_free_fdt: |
| - folio_put(state->fdt); |
| err_free_folios: |
| kvfree(folios); |
| return err; |
| @@ -326,7 +307,6 @@ static void kho_test_cleanup(void) |
| |
| static void __exit kho_test_exit(void) |
| { |
| - unregister_kho_notifier(&kho_test_nb); |
| kho_test_cleanup(); |
| } |
| module_exit(kho_test_exit); |
| --- a/mm/memblock.c~kho-drop-notifiers |
| +++ a/mm/memblock.c |
| @@ -2444,53 +2444,18 @@ int reserve_mem_release_by_name(const ch |
| #define MEMBLOCK_KHO_FDT "memblock" |
| #define MEMBLOCK_KHO_NODE_COMPATIBLE "memblock-v1" |
| #define RESERVE_MEM_KHO_NODE_COMPATIBLE "reserve-mem-v1" |
| -static struct page *kho_fdt; |
| - |
| -static int reserve_mem_kho_finalize(struct kho_serialization *ser) |
| -{ |
| - int err = 0, i; |
| - |
| - for (i = 0; i < reserved_mem_count; i++) { |
| - struct reserve_mem_table *map = &reserved_mem_table[i]; |
| - struct page *page = phys_to_page(map->start); |
| - unsigned int nr_pages = map->size >> PAGE_SHIFT; |
| - |
| - err |= kho_preserve_pages(page, nr_pages); |
| - } |
| - |
| - err |= kho_preserve_folio(page_folio(kho_fdt)); |
| - err |= kho_add_subtree(ser, MEMBLOCK_KHO_FDT, page_to_virt(kho_fdt)); |
| - |
| - return notifier_from_errno(err); |
| -} |
| - |
| -static int reserve_mem_kho_notifier(struct notifier_block *self, |
| - unsigned long cmd, void *v) |
| -{ |
| - switch (cmd) { |
| - case KEXEC_KHO_FINALIZE: |
| - return reserve_mem_kho_finalize((struct kho_serialization *)v); |
| - case KEXEC_KHO_ABORT: |
| - return NOTIFY_DONE; |
| - default: |
| - return NOTIFY_BAD; |
| - } |
| -} |
| - |
| -static struct notifier_block reserve_mem_kho_nb = { |
| - .notifier_call = reserve_mem_kho_notifier, |
| -}; |
| |
| static int __init prepare_kho_fdt(void) |
| { |
| int err = 0, i; |
| + struct page *fdt_page; |
| void *fdt; |
| |
| - kho_fdt = alloc_page(GFP_KERNEL); |
| - if (!kho_fdt) |
| + fdt_page = alloc_page(GFP_KERNEL); |
| + if (!fdt_page) |
| return -ENOMEM; |
| |
| - fdt = page_to_virt(kho_fdt); |
| + fdt = page_to_virt(fdt_page); |
| |
| err |= fdt_create(fdt, PAGE_SIZE); |
| err |= fdt_finish_reservemap(fdt); |
| @@ -2499,7 +2464,10 @@ static int __init prepare_kho_fdt(void) |
| err |= fdt_property_string(fdt, "compatible", MEMBLOCK_KHO_NODE_COMPATIBLE); |
| for (i = 0; i < reserved_mem_count; i++) { |
| struct reserve_mem_table *map = &reserved_mem_table[i]; |
| + struct page *page = phys_to_page(map->start); |
| + unsigned int nr_pages = map->size >> PAGE_SHIFT; |
| |
| + err |= kho_preserve_pages(page, nr_pages); |
| err |= fdt_begin_node(fdt, map->name); |
| err |= fdt_property_string(fdt, "compatible", RESERVE_MEM_KHO_NODE_COMPATIBLE); |
| err |= fdt_property(fdt, "start", &map->start, sizeof(map->start)); |
| @@ -2507,13 +2475,14 @@ static int __init prepare_kho_fdt(void) |
| err |= fdt_end_node(fdt); |
| } |
| err |= fdt_end_node(fdt); |
| - |
| err |= fdt_finish(fdt); |
| |
| + err |= kho_preserve_folio(page_folio(fdt_page)); |
| + err |= kho_add_subtree(MEMBLOCK_KHO_FDT, fdt); |
| + |
| if (err) { |
| pr_err("failed to prepare memblock FDT for KHO: %d\n", err); |
| - put_page(kho_fdt); |
| - kho_fdt = NULL; |
| + put_page(fdt_page); |
| } |
| |
| return err; |
| @@ -2529,13 +2498,6 @@ static int __init reserve_mem_init(void) |
| err = prepare_kho_fdt(); |
| if (err) |
| return err; |
| - |
| - err = register_kho_notifier(&reserve_mem_kho_nb); |
| - if (err) { |
| - put_page(kho_fdt); |
| - kho_fdt = NULL; |
| - } |
| - |
| return err; |
| } |
| late_initcall(reserve_mem_init); |
| _ |