| From 0af462f19e635ad522f28981238334620881badc Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Mon, 1 May 2023 17:42:06 +0200 |
| Subject: debugobject: Ensure pool refill (again) |
| |
| From: Thomas Gleixner <tglx@linutronix.de> |
| |
| commit 0af462f19e635ad522f28981238334620881badc upstream. |
| |
| The recent fix to ensure atomicity of lookup and allocation inadvertently |
| broke the pool refill mechanism. |
| |
| Prior to that change debug_objects_activate() and debug_objecs_assert_init() |
| invoked debug_objecs_init() to set up the tracking object for statically |
| initialized objects. That's not longer the case and debug_objecs_init() is |
| now the only place which does pool refills. |
| |
| Depending on the number of statically initialized objects this can be |
| enough to actually deplete the pool, which was observed by Ido via a |
| debugobjects OOM warning. |
| |
| Restore the old behaviour by adding explicit refill opportunities to |
| debug_objects_activate() and debug_objecs_assert_init(). |
| |
| Fixes: 63a759694eed ("debugobject: Prevent init race with static objects") |
| Reported-by: Ido Schimmel <idosch@nvidia.com> |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| Tested-by: Ido Schimmel <idosch@nvidia.com> |
| Link: https://lore.kernel.org/r/871qk05a9d.ffs@tglx |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| lib/debugobjects.c | 21 +++++++++++++++------ |
| 1 file changed, 15 insertions(+), 6 deletions(-) |
| |
| --- a/lib/debugobjects.c |
| +++ b/lib/debugobjects.c |
| @@ -587,6 +587,16 @@ static struct debug_obj *lookup_object_o |
| return NULL; |
| } |
| |
| +static void debug_objects_fill_pool(void) |
| +{ |
| + /* |
| + * On RT enabled kernels the pool refill must happen in preemptible |
| + * context: |
| + */ |
| + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) |
| + fill_pool(); |
| +} |
| + |
| static void |
| __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) |
| { |
| @@ -595,12 +605,7 @@ __debug_object_init(void *addr, const st |
| struct debug_obj *obj; |
| unsigned long flags; |
| |
| - /* |
| - * On RT enabled kernels the pool refill must happen in preemptible |
| - * context: |
| - */ |
| - if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) |
| - fill_pool(); |
| + debug_objects_fill_pool(); |
| |
| db = get_bucket((unsigned long) addr); |
| |
| @@ -685,6 +690,8 @@ int debug_object_activate(void *addr, co |
| if (!debug_objects_enabled) |
| return 0; |
| |
| + debug_objects_fill_pool(); |
| + |
| db = get_bucket((unsigned long) addr); |
| |
| raw_spin_lock_irqsave(&db->lock, flags); |
| @@ -894,6 +901,8 @@ void debug_object_assert_init(void *addr |
| if (!debug_objects_enabled) |
| return; |
| |
| + debug_objects_fill_pool(); |
| + |
| db = get_bucket((unsigned long) addr); |
| |
| raw_spin_lock_irqsave(&db->lock, flags); |