| From 8b8b36834d0fff67fc8668093f4312dd04dcf21d Mon Sep 17 00:00:00 2001 |
| From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org> |
| Date: Tue, 10 Jun 2014 09:46:00 -0400 |
| Subject: ring-buffer: Check if buffer exists before polling |
| |
| From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org> |
| |
| commit 8b8b36834d0fff67fc8668093f4312dd04dcf21d upstream. |
| |
| The per_cpu buffers are created one per possible CPU. But these do |
| not mean that those CPUs are online, nor do they even exist. |
| |
| With the addition of the ring buffer polling, it assumes that the |
| caller polls on an existing buffer. But this is not the case if |
| the user reads trace_pipe from a CPU that does not exist, and this |
| causes the kernel to crash. |
| |
| Simple fix is to check the cpu against buffer bitmask against to see |
| if the buffer was allocated or not and return -ENODEV if it is |
| not. |
| |
| More updates were done to pass the -ENODEV back up to userspace. |
| |
| Link: http://lkml.kernel.org/r/5393DB61.6060707@oracle.com |
| |
| Reported-by: Sasha Levin <sasha.levin@oracle.com> |
| Signed-off-by: Steven Rostedt <rostedt@goodmis.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| include/linux/ring_buffer.h | 2 +- |
| kernel/trace/ring_buffer.c | 5 ++++- |
| kernel/trace/trace.c | 25 ++++++++++++++++++------- |
| kernel/trace/trace.h | 4 ++-- |
| 4 files changed, 25 insertions(+), 11 deletions(-) |
| |
| --- a/include/linux/ring_buffer.h |
| +++ b/include/linux/ring_buffer.h |
| @@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, |
| __ring_buffer_alloc((size), (flags), &__key); \ |
| }) |
| |
| -void ring_buffer_wait(struct ring_buffer *buffer, int cpu); |
| +int ring_buffer_wait(struct ring_buffer *buffer, int cpu); |
| int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, |
| struct file *filp, poll_table *poll_table); |
| |
| --- a/kernel/trace/ring_buffer.c |
| +++ b/kernel/trace/ring_buffer.c |
| @@ -543,7 +543,7 @@ static void rb_wake_up_waiters(struct ir |
| * as data is added to any of the @buffer's cpu buffers. Otherwise |
| * it will wait for data to be added to a specific cpu buffer. |
| */ |
| -void ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
| +int ring_buffer_wait(struct ring_buffer *buffer, int cpu) |
| { |
| struct ring_buffer_per_cpu *cpu_buffer; |
| DEFINE_WAIT(wait); |
| @@ -557,6 +557,8 @@ void ring_buffer_wait(struct ring_buffer |
| if (cpu == RING_BUFFER_ALL_CPUS) |
| work = &buffer->irq_work; |
| else { |
| + if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
| + return -ENODEV; |
| cpu_buffer = buffer->buffers[cpu]; |
| work = &cpu_buffer->irq_work; |
| } |
| @@ -591,6 +593,7 @@ void ring_buffer_wait(struct ring_buffer |
| schedule(); |
| |
| finish_wait(&work->waiters, &wait); |
| + return 0; |
| } |
| |
| /** |
| --- a/kernel/trace/trace.c |
| +++ b/kernel/trace/trace.c |
| @@ -1091,13 +1091,13 @@ update_max_tr_single(struct trace_array |
| } |
| #endif /* CONFIG_TRACER_MAX_TRACE */ |
| |
| -static void default_wait_pipe(struct trace_iterator *iter) |
| +static int default_wait_pipe(struct trace_iterator *iter) |
| { |
| /* Iterators are static, they should be filled or empty */ |
| if (trace_buffer_iter(iter, iter->cpu_file)) |
| - return; |
| + return 0; |
| |
| - ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); |
| + return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); |
| } |
| |
| #ifdef CONFIG_FTRACE_STARTUP_TEST |
| @@ -4160,17 +4160,19 @@ tracing_poll_pipe(struct file *filp, pol |
| * |
| * Anyway, this is really very primitive wakeup. |
| */ |
| -void poll_wait_pipe(struct trace_iterator *iter) |
| +int poll_wait_pipe(struct trace_iterator *iter) |
| { |
| set_current_state(TASK_INTERRUPTIBLE); |
| /* sleep for 100 msecs, and try again. */ |
| schedule_timeout(HZ / 10); |
| + return 0; |
| } |
| |
| /* Must be called with trace_types_lock mutex held. */ |
| static int tracing_wait_pipe(struct file *filp) |
| { |
| struct trace_iterator *iter = filp->private_data; |
| + int ret; |
| |
| while (trace_empty(iter)) { |
| |
| @@ -4180,10 +4182,13 @@ static int tracing_wait_pipe(struct file |
| |
| mutex_unlock(&iter->mutex); |
| |
| - iter->trace->wait_pipe(iter); |
| + ret = iter->trace->wait_pipe(iter); |
| |
| mutex_lock(&iter->mutex); |
| |
| + if (ret) |
| + return ret; |
| + |
| if (signal_pending(current)) |
| return -EINTR; |
| |
| @@ -5111,8 +5116,12 @@ tracing_buffers_read(struct file *filp, |
| goto out_unlock; |
| } |
| mutex_unlock(&trace_types_lock); |
| - iter->trace->wait_pipe(iter); |
| + ret = iter->trace->wait_pipe(iter); |
| mutex_lock(&trace_types_lock); |
| + if (ret) { |
| + size = ret; |
| + goto out_unlock; |
| + } |
| if (signal_pending(current)) { |
| size = -EINTR; |
| goto out_unlock; |
| @@ -5324,8 +5333,10 @@ tracing_buffers_splice_read(struct file |
| goto out; |
| } |
| mutex_unlock(&trace_types_lock); |
| - iter->trace->wait_pipe(iter); |
| + ret = iter->trace->wait_pipe(iter); |
| mutex_lock(&trace_types_lock); |
| + if (ret) |
| + goto out; |
| if (signal_pending(current)) { |
| ret = -EINTR; |
| goto out; |
| --- a/kernel/trace/trace.h |
| +++ b/kernel/trace/trace.h |
| @@ -336,7 +336,7 @@ struct tracer { |
| void (*stop)(struct trace_array *tr); |
| void (*open)(struct trace_iterator *iter); |
| void (*pipe_open)(struct trace_iterator *iter); |
| - void (*wait_pipe)(struct trace_iterator *iter); |
| + int (*wait_pipe)(struct trace_iterator *iter); |
| void (*close)(struct trace_iterator *iter); |
| void (*pipe_close)(struct trace_iterator *iter); |
| ssize_t (*read)(struct trace_iterator *iter, |
| @@ -552,7 +552,7 @@ void trace_init_global_iter(struct trace |
| |
| void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
| |
| -void poll_wait_pipe(struct trace_iterator *iter); |
| +int poll_wait_pipe(struct trace_iterator *iter); |
| |
| void tracing_sched_switch_trace(struct trace_array *tr, |
| struct task_struct *prev, |