| From b2d59693100d10712e84e1d63c99896b5fca820a Mon Sep 17 00:00:00 2001 |
| From: Sasha Levin <sashal@kernel.org> |
| Date: Wed, 5 Feb 2020 09:20:32 -0500 |
| Subject: ftrace: Protect ftrace_graph_hash with ftrace_sync |
| |
| From: Steven Rostedt (VMware) <rostedt@goodmis.org> |
| |
| [ Upstream commit 54a16ff6f2e50775145b210bcd94d62c3c2af117 ] |
| |
| As function_graph tracer can run when RCU is not "watching", it can not be |
| protected by synchronize_rcu() it requires running a task on each CPU before |
| it can be freed. Calling schedule_on_each_cpu(ftrace_sync) needs to be used. |
| |
| Link: https://lore.kernel.org/r/20200205131110.GT2935@paulmck-ThinkPad-P72 |
| |
| Cc: stable@vger.kernel.org |
| Fixes: b9b0c831bed26 ("ftrace: Convert graph filter to use hash tables") |
| Reported-by: "Paul E. McKenney" <paulmck@kernel.org> |
| Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> |
| Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> |
| Signed-off-by: Sasha Levin <sashal@kernel.org> |
| --- |
| kernel/trace/ftrace.c | 11 +++++++++-- |
| kernel/trace/trace.h | 2 ++ |
| 2 files changed, 11 insertions(+), 2 deletions(-) |
| |
| diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c |
| index 09c69ad8439ef..53795237e9751 100644 |
| --- a/kernel/trace/ftrace.c |
| +++ b/kernel/trace/ftrace.c |
| @@ -5344,8 +5344,15 @@ ftrace_graph_release(struct inode *inode, struct file *file) |
| |
| mutex_unlock(&graph_lock); |
| |
| - /* Wait till all users are no longer using the old hash */ |
| - synchronize_sched(); |
| + /* |
| + * We need to do a hard force of sched synchronization. |
| + * This is because we use preempt_disable() to do RCU, but |
| + * the function tracers can be called where RCU is not watching |
| + * (like before user_exit()). We can not rely on the RCU |
| + * infrastructure to do the synchronization, thus we must do it |
| + * ourselves. |
| + */ |
| + schedule_on_each_cpu(ftrace_sync); |
| |
| free_ftrace_hash(old_hash); |
| } |
| diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h |
| index 1721b95ba9b7d..ee0c6a313ed1a 100644 |
| --- a/kernel/trace/trace.h |
| +++ b/kernel/trace/trace.h |
| @@ -887,6 +887,7 @@ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) |
| * Have to open code "rcu_dereference_sched()" because the |
| * function graph tracer can be called when RCU is not |
| * "watching". |
| + * Protected with schedule_on_each_cpu(ftrace_sync) |
| */ |
| hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible()); |
| |
| @@ -939,6 +940,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr) |
| * Have to open code "rcu_dereference_sched()" because the |
| * function graph tracer can be called when RCU is not |
| * "watching". |
| + * Protected with schedule_on_each_cpu(ftrace_sync) |
| */ |
| notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, |
| !preemptible()); |
| -- |
| 2.20.1 |
| |