| From 91a54090026f84ceffaa12ac53c99b9f162946f6 Mon Sep 17 00:00:00 2001 |
| From: Martin Kaiser <martin@kaiser.cx> |
| Date: Thu, 30 Oct 2025 16:55:05 +0100 |
| Subject: maple_tree: fix tracepoint string pointers |
| |
| From: Martin Kaiser <martin@kaiser.cx> |
| |
| commit 91a54090026f84ceffaa12ac53c99b9f162946f6 upstream. |
| |
| maple_tree tracepoints contain pointers to function names. Such a pointer |
| is saved when a tracepoint logs an event. There's no guarantee that it's |
| still valid when the event is parsed later and the pointer is dereferenced. |
| |
| The kernel warns about these unsafe pointers. |
| |
| event 'ma_read' has unsafe pointer field 'fn' |
| WARNING: kernel/trace/trace.c:3779 at ignore_event+0x1da/0x1e4 |
| |
| Mark the function names as tracepoint_string() to fix the events. |
| |
| One case that doesn't work without my patch would be trace-cmd record |
| to save the binary ringbuffer and trace-cmd report to parse it in |
| userspace. The address of __func__ can't be dereferenced from |
| userspace but tracepoint_string will add an entry to |
| /sys/kernel/tracing/printk_formats |
| |
| Link: https://lkml.kernel.org/r/20251030155537.87972-1-martin@kaiser.cx |
| Fixes: 54a611b60590 ("Maple Tree: add new data structure") |
| Signed-off-by: Martin Kaiser <martin@kaiser.cx> |
| Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com> |
| Cc: <stable@vger.kernel.org> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| lib/maple_tree.c | 30 ++++++++++++++++-------------- |
| 1 file changed, 16 insertions(+), 14 deletions(-) |
| |
| --- a/lib/maple_tree.c |
| +++ b/lib/maple_tree.c |
| @@ -64,6 +64,8 @@ |
| #define CREATE_TRACE_POINTS |
| #include <trace/events/maple_tree.h> |
| |
| +#define TP_FCT tracepoint_string(__func__) |
| + |
| /* |
| * Kernel pointer hashing renders much of the maple tree dump useless as tagged |
| * pointers get hashed to arbitrary values. |
| @@ -2976,7 +2978,7 @@ static inline void mas_rebalance(struct |
| MA_STATE(l_mas, mas->tree, mas->index, mas->last); |
| MA_STATE(r_mas, mas->tree, mas->index, mas->last); |
| |
| - trace_ma_op(__func__, mas); |
| + trace_ma_op(TP_FCT, mas); |
| |
| /* |
| * Rebalancing occurs if a node is insufficient. Data is rebalanced |
| @@ -3337,7 +3339,7 @@ static void mas_split(struct ma_state *m |
| MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); |
| MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); |
| |
| - trace_ma_op(__func__, mas); |
| + trace_ma_op(TP_FCT, mas); |
| |
| mast.l = &l_mas; |
| mast.r = &r_mas; |
| @@ -3512,7 +3514,7 @@ static bool mas_is_span_wr(struct ma_wr_ |
| return false; |
| } |
| |
| - trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry); |
| + trace_ma_write(TP_FCT, wr_mas->mas, wr_mas->r_max, entry); |
| return true; |
| } |
| |
| @@ -3756,7 +3758,7 @@ static noinline void mas_wr_spanning_sto |
| * of data may happen. |
| */ |
| mas = wr_mas->mas; |
| - trace_ma_op(__func__, mas); |
| + trace_ma_op(TP_FCT, mas); |
| |
| if (unlikely(!mas->index && mas->last == ULONG_MAX)) |
| return mas_new_root(mas, wr_mas->entry); |
| @@ -3894,7 +3896,7 @@ done: |
| } else { |
| memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); |
| } |
| - trace_ma_write(__func__, mas, 0, wr_mas->entry); |
| + trace_ma_write(TP_FCT, mas, 0, wr_mas->entry); |
| mas_update_gap(mas); |
| mas->end = new_end; |
| return; |
| @@ -3938,7 +3940,7 @@ static inline void mas_wr_slot_store(str |
| mas->offset++; /* Keep mas accurate. */ |
| } |
| |
| - trace_ma_write(__func__, mas, 0, wr_mas->entry); |
| + trace_ma_write(TP_FCT, mas, 0, wr_mas->entry); |
| /* |
| * Only update gap when the new entry is empty or there is an empty |
| * entry in the original two ranges. |
| @@ -4059,7 +4061,7 @@ static inline void mas_wr_append(struct |
| mas_update_gap(mas); |
| |
| mas->end = new_end; |
| - trace_ma_write(__func__, mas, new_end, wr_mas->entry); |
| + trace_ma_write(TP_FCT, mas, new_end, wr_mas->entry); |
| return; |
| } |
| |
| @@ -4073,7 +4075,7 @@ static void mas_wr_bnode(struct ma_wr_st |
| { |
| struct maple_big_node b_node; |
| |
| - trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); |
| + trace_ma_write(TP_FCT, wr_mas->mas, 0, wr_mas->entry); |
| memset(&b_node, 0, sizeof(struct maple_big_node)); |
| mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); |
| mas_commit_b_node(wr_mas, &b_node); |
| @@ -5405,7 +5407,7 @@ void *mas_store(struct ma_state *mas, vo |
| int request; |
| MA_WR_STATE(wr_mas, mas, entry); |
| |
| - trace_ma_write(__func__, mas, 0, entry); |
| + trace_ma_write(TP_FCT, mas, 0, entry); |
| #ifdef CONFIG_DEBUG_MAPLE_TREE |
| if (MAS_WARN_ON(mas, mas->index > mas->last)) |
| pr_err("Error %lX > %lX " PTR_FMT "\n", mas->index, mas->last, |
| @@ -5506,7 +5508,7 @@ void mas_store_prealloc(struct ma_state |
| } |
| |
| store: |
| - trace_ma_write(__func__, mas, 0, entry); |
| + trace_ma_write(TP_FCT, mas, 0, entry); |
| mas_wr_store_entry(&wr_mas); |
| MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); |
| mas_destroy(mas); |
| @@ -6319,7 +6321,7 @@ void *mtree_load(struct maple_tree *mt, |
| MA_STATE(mas, mt, index, index); |
| void *entry; |
| |
| - trace_ma_read(__func__, &mas); |
| + trace_ma_read(TP_FCT, &mas); |
| rcu_read_lock(); |
| retry: |
| entry = mas_start(&mas); |
| @@ -6362,7 +6364,7 @@ int mtree_store_range(struct maple_tree |
| MA_STATE(mas, mt, index, last); |
| int ret = 0; |
| |
| - trace_ma_write(__func__, &mas, 0, entry); |
| + trace_ma_write(TP_FCT, &mas, 0, entry); |
| if (WARN_ON_ONCE(xa_is_advanced(entry))) |
| return -EINVAL; |
| |
| @@ -6585,7 +6587,7 @@ void *mtree_erase(struct maple_tree *mt, |
| void *entry = NULL; |
| |
| MA_STATE(mas, mt, index, index); |
| - trace_ma_op(__func__, &mas); |
| + trace_ma_op(TP_FCT, &mas); |
| |
| mtree_lock(mt); |
| entry = mas_erase(&mas); |
| @@ -6923,7 +6925,7 @@ void *mt_find(struct maple_tree *mt, uns |
| unsigned long copy = *index; |
| #endif |
| |
| - trace_ma_read(__func__, &mas); |
| + trace_ma_read(TP_FCT, &mas); |
| |
| if ((*index) > max) |
| return NULL; |