blob: a784d068148872383dd4e5b3b9aba3ce1c026508 [file] [log] [blame]
From f4d52627764ace111263b1f8fa1177a65588a65f Mon Sep 17 00:00:00 2001
From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
Date: Wed, 7 Feb 2018 17:26:32 -0500
Subject: [PATCH 1782/1795] ring-buffer: Add nesting for adding events within
events
The ring-buffer code has recusion protection in case tracing ends up tracing
itself, the ring-buffer will detect that it was called at the same context
(normal, softirq, interrupt or NMI), and not continue to record the event.
With the histogram synthetic events, they are called while tracing another
event at the same context. The recusion protection triggers because it
detects tracing at the same context and stops it.
Add ring_buffer_nest_start() and ring_buffer_nest_end() that will notify the
ring buffer that a trace is about to happen within another trace and that it
is intended, and not to trigger the recursion blocking.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
(cherry picked from commit 8e012066fe0de5ff5be606836f9075511bce5604)
Signed-off-by: Hirotaka MOTAI <Motai.Hirotaka@aj.MitsubishiElectric.co.jp>
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
---
include/linux/ring_buffer.h | 3 ++
kernel/trace/ring_buffer.c | 57 +++++++++++++++++++++++++++++++++++--
2 files changed, 57 insertions(+), 3 deletions(-)
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 6c2a6b3f3c6d..abce5f5325e1 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -117,6 +117,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length, void *data);
+void ring_buffer_nest_start(struct ring_buffer *buffer);
+void ring_buffer_nest_end(struct ring_buffer *buffer);
+
struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 45fc91559aa6..75fea9321ffb 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -477,6 +477,7 @@ struct ring_buffer_per_cpu {
struct buffer_page *reader_page;
unsigned long lost_events;
unsigned long last_overrun;
+ unsigned long nest;
local_t entries_bytes;
local_t entries;
local_t overrun;
@@ -2635,10 +2636,10 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
bit = pc & NMI_MASK ? RB_CTX_NMI :
pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
- if (unlikely(val & (1 << bit)))
+ if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
return 1;
- val |= (1 << bit);
+ val |= (1 << (bit + cpu_buffer->nest));
cpu_buffer->current_context = val;
return 0;
@@ -2647,7 +2648,57 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
{
- cpu_buffer->current_context &= cpu_buffer->current_context - 1;
+ cpu_buffer->current_context &=
+ cpu_buffer->current_context - (1 << cpu_buffer->nest);
+}
+
+/* The recursive locking above uses 4 bits */
+#define NESTED_BITS 4
+
+/**
+ * ring_buffer_nest_start - Allow to trace while nested
+ * @buffer: The ring buffer to modify
+ *
+ * The ring buffer has a safty mechanism to prevent recursion.
+ * But there may be a case where a trace needs to be done while
+ * tracing something else. In this case, calling this function
+ * will allow this function to nest within a currently active
+ * ring_buffer_lock_reserve().
+ *
+ * Call this function before calling another ring_buffer_lock_reserve() and
+ * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
+ */
+void ring_buffer_nest_start(struct ring_buffer *buffer)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ int cpu;
+
+ /* Enabled by ring_buffer_nest_end() */
+ preempt_disable_notrace();
+ cpu = raw_smp_processor_id();
+ cpu_buffer = buffer->buffers[cpu];
+ /* This is the shift value for the above recusive locking */
+ cpu_buffer->nest += NESTED_BITS;
+}
+
+/**
+ * ring_buffer_nest_end - Allow to trace while nested
+ * @buffer: The ring buffer to modify
+ *
+ * Must be called after ring_buffer_nest_start() and after the
+ * ring_buffer_unlock_commit().
+ */
+void ring_buffer_nest_end(struct ring_buffer *buffer)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ int cpu;
+
+ /* disabled by ring_buffer_nest_start() */
+ cpu = raw_smp_processor_id();
+ cpu_buffer = buffer->buffers[cpu];
+ /* This is the shift value for the above recusive locking */
+ cpu_buffer->nest -= NESTED_BITS;
+ preempt_enable_notrace();
}
/**
--
2.19.0