| From bbeb97464eefc65f506084fd9f18f21653e01137 Mon Sep 17 00:00:00 2001 |
| From: Gaurav Kohli <gkohli@codeaurora.org> |
| Date: Tue, 6 Oct 2020 15:03:53 +0530 |
| Subject: tracing: Fix race in trace_open and buffer resize call |
| |
| From: Gaurav Kohli <gkohli@codeaurora.org> |
| |
| commit bbeb97464eefc65f506084fd9f18f21653e01137 upstream. |
| |
| Below race can come, if trace_open and resize of |
| cpu buffer is running parallely on different cpus |
| CPUX CPUY |
| ring_buffer_resize |
| atomic_read(&buffer->resize_disabled) |
| tracing_open |
| tracing_reset_online_cpus |
| ring_buffer_reset_cpu |
| rb_reset_cpu |
| rb_update_pages |
| remove/insert pages |
| resetting pointer |
| |
| This race can cause data abort or some times infinte loop in |
| rb_remove_pages and rb_insert_pages while checking pages |
| for sanity. |
| |
| Take buffer lock to fix this. |
| |
| Link: https://lkml.kernel.org/r/1601976833-24377-1-git-send-email-gkohli@codeaurora.org |
| |
| Cc: stable@vger.kernel.org |
| Fixes: b23d7a5f4a07a ("ring-buffer: speed up buffer resets by avoiding synchronize_rcu for each CPU") |
| Signed-off-by: Gaurav Kohli <gkohli@codeaurora.org> |
| Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| kernel/trace/ring_buffer.c | 10 ++++++++++ |
| 1 file changed, 10 insertions(+) |
| |
| --- a/kernel/trace/ring_buffer.c |
| +++ b/kernel/trace/ring_buffer.c |
| @@ -4866,6 +4866,9 @@ void ring_buffer_reset_cpu(struct trace_ |
| if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| return; |
| |
| + /* prevent another thread from changing buffer sizes */ |
| + mutex_lock(&buffer->mutex); |
| + |
| atomic_inc(&cpu_buffer->resize_disabled); |
| atomic_inc(&cpu_buffer->record_disabled); |
| |
| @@ -4876,6 +4879,8 @@ void ring_buffer_reset_cpu(struct trace_ |
| |
| atomic_dec(&cpu_buffer->record_disabled); |
| atomic_dec(&cpu_buffer->resize_disabled); |
| + |
| + mutex_unlock(&buffer->mutex); |
| } |
| EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); |
| |
| @@ -4889,6 +4894,9 @@ void ring_buffer_reset_online_cpus(struc |
| struct ring_buffer_per_cpu *cpu_buffer; |
| int cpu; |
| |
| + /* prevent another thread from changing buffer sizes */ |
| + mutex_lock(&buffer->mutex); |
| + |
| for_each_online_buffer_cpu(buffer, cpu) { |
| cpu_buffer = buffer->buffers[cpu]; |
| |
| @@ -4907,6 +4915,8 @@ void ring_buffer_reset_online_cpus(struc |
| atomic_dec(&cpu_buffer->record_disabled); |
| atomic_dec(&cpu_buffer->resize_disabled); |
| } |
| + |
| + mutex_unlock(&buffer->mutex); |
| } |
| |
| /** |