| From: Tom Zanussi <tom.zanussi@linux.intel.com> |
| Date: Tue, 5 Sep 2017 16:57:21 -0500 |
| Subject: [PATCH 09/40] tracing: Give event triggers access to |
| ring_buffer_event |
| |
| The ring_buffer event can provide a timestamp that may be useful to |
| various triggers - pass it into the handlers for that purpose. |
| |
| Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com> |
| Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| --- |
| include/linux/trace_events.h | 14 ++++++---- |
| kernel/trace/trace.h | 9 +++--- |
| kernel/trace/trace_events_hist.c | 11 +++++--- |
| kernel/trace/trace_events_trigger.c | 47 ++++++++++++++++++++++-------------- |
| 4 files changed, 49 insertions(+), 32 deletions(-) |
| |
| --- a/include/linux/trace_events.h |
| +++ b/include/linux/trace_events.h |
| @@ -400,11 +400,13 @@ enum event_trigger_type { |
| |
| extern int filter_match_preds(struct event_filter *filter, void *rec); |
| |
| -extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, |
| - void *rec); |
| -extern void event_triggers_post_call(struct trace_event_file *file, |
| - enum event_trigger_type tt, |
| - void *rec); |
| +extern enum event_trigger_type |
| +event_triggers_call(struct trace_event_file *file, void *rec, |
| + struct ring_buffer_event *event); |
| +extern void |
| +event_triggers_post_call(struct trace_event_file *file, |
| + enum event_trigger_type tt, |
| + void *rec, struct ring_buffer_event *event); |
| |
| bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); |
| |
| @@ -424,7 +426,7 @@ trace_trigger_soft_disabled(struct trace |
| |
| if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { |
| if (eflags & EVENT_FILE_FL_TRIGGER_MODE) |
| - event_triggers_call(file, NULL); |
| + event_triggers_call(file, NULL, NULL); |
| if (eflags & EVENT_FILE_FL_SOFT_DISABLED) |
| return true; |
| if (eflags & EVENT_FILE_FL_PID_FILTER) |
| --- a/kernel/trace/trace.h |
| +++ b/kernel/trace/trace.h |
| @@ -1190,7 +1190,7 @@ static inline bool |
| unsigned long eflags = file->flags; |
| |
| if (eflags & EVENT_FILE_FL_TRIGGER_COND) |
| - *tt = event_triggers_call(file, entry); |
| + *tt = event_triggers_call(file, entry, event); |
| |
| if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || |
| (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && |
| @@ -1227,7 +1227,7 @@ event_trigger_unlock_commit(struct trace |
| trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); |
| |
| if (tt) |
| - event_triggers_post_call(file, tt, entry); |
| + event_triggers_post_call(file, tt, entry, event); |
| } |
| |
| /** |
| @@ -1260,7 +1260,7 @@ event_trigger_unlock_commit_regs(struct |
| irq_flags, pc, regs); |
| |
| if (tt) |
| - event_triggers_post_call(file, tt, entry); |
| + event_triggers_post_call(file, tt, entry, event); |
| } |
| |
| #define FILTER_PRED_INVALID ((unsigned short)-1) |
| @@ -1483,7 +1483,8 @@ extern int register_trigger_hist_enable_ |
| */ |
| struct event_trigger_ops { |
| void (*func)(struct event_trigger_data *data, |
| - void *rec); |
| + void *rec, |
| + struct ring_buffer_event *rbe); |
| int (*init)(struct event_trigger_ops *ops, |
| struct event_trigger_data *data); |
| void (*free)(struct event_trigger_ops *ops, |
| --- a/kernel/trace/trace_events_hist.c |
| +++ b/kernel/trace/trace_events_hist.c |
| @@ -909,7 +909,8 @@ static inline void add_to_key(char *comp |
| memcpy(compound_key + key_field->offset, key, size); |
| } |
| |
| -static void event_hist_trigger(struct event_trigger_data *data, void *rec) |
| +static void event_hist_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| struct hist_trigger_data *hist_data = data->private_data; |
| bool use_compound_key = (hist_data->n_keys > 1); |
| @@ -1660,7 +1661,8 @@ static struct event_command trigger_hist |
| } |
| |
| static void |
| -hist_enable_trigger(struct event_trigger_data *data, void *rec) |
| +hist_enable_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| struct enable_trigger_data *enable_data = data->private_data; |
| struct event_trigger_data *test; |
| @@ -1676,7 +1678,8 @@ hist_enable_trigger(struct event_trigger |
| } |
| |
| static void |
| -hist_enable_count_trigger(struct event_trigger_data *data, void *rec) |
| +hist_enable_count_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| if (!data->count) |
| return; |
| @@ -1684,7 +1687,7 @@ hist_enable_count_trigger(struct event_t |
| if (data->count != -1) |
| (data->count)--; |
| |
| - hist_enable_trigger(data, rec); |
| + hist_enable_trigger(data, rec, event); |
| } |
| |
| static struct event_trigger_ops hist_enable_trigger_ops = { |
| --- a/kernel/trace/trace_events_trigger.c |
| +++ b/kernel/trace/trace_events_trigger.c |
| @@ -63,7 +63,8 @@ void trigger_data_free(struct event_trig |
| * any trigger that should be deferred, ETT_NONE if nothing to defer. |
| */ |
| enum event_trigger_type |
| -event_triggers_call(struct trace_event_file *file, void *rec) |
| +event_triggers_call(struct trace_event_file *file, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| struct event_trigger_data *data; |
| enum event_trigger_type tt = ETT_NONE; |
| @@ -76,7 +77,7 @@ event_triggers_call(struct trace_event_f |
| if (data->paused) |
| continue; |
| if (!rec) { |
| - data->ops->func(data, rec); |
| + data->ops->func(data, rec, event); |
| continue; |
| } |
| filter = rcu_dereference_sched(data->filter); |
| @@ -86,7 +87,7 @@ event_triggers_call(struct trace_event_f |
| tt |= data->cmd_ops->trigger_type; |
| continue; |
| } |
| - data->ops->func(data, rec); |
| + data->ops->func(data, rec, event); |
| } |
| return tt; |
| } |
| @@ -108,7 +109,7 @@ EXPORT_SYMBOL_GPL(event_triggers_call); |
| void |
| event_triggers_post_call(struct trace_event_file *file, |
| enum event_trigger_type tt, |
| - void *rec) |
| + void *rec, struct ring_buffer_event *event) |
| { |
| struct event_trigger_data *data; |
| |
| @@ -116,7 +117,7 @@ event_triggers_post_call(struct trace_ev |
| if (data->paused) |
| continue; |
| if (data->cmd_ops->trigger_type & tt) |
| - data->ops->func(data, rec); |
| + data->ops->func(data, rec, event); |
| } |
| } |
| EXPORT_SYMBOL_GPL(event_triggers_post_call); |
| @@ -909,7 +910,8 @@ void set_named_trigger_data(struct event |
| } |
| |
| static void |
| -traceon_trigger(struct event_trigger_data *data, void *rec) |
| +traceon_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| if (tracing_is_on()) |
| return; |
| @@ -918,7 +920,8 @@ traceon_trigger(struct event_trigger_dat |
| } |
| |
| static void |
| -traceon_count_trigger(struct event_trigger_data *data, void *rec) |
| +traceon_count_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| if (tracing_is_on()) |
| return; |
| @@ -933,7 +936,8 @@ traceon_count_trigger(struct event_trigg |
| } |
| |
| static void |
| -traceoff_trigger(struct event_trigger_data *data, void *rec) |
| +traceoff_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| if (!tracing_is_on()) |
| return; |
| @@ -942,7 +946,8 @@ traceoff_trigger(struct event_trigger_da |
| } |
| |
| static void |
| -traceoff_count_trigger(struct event_trigger_data *data, void *rec) |
| +traceoff_count_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| if (!tracing_is_on()) |
| return; |
| @@ -1039,13 +1044,15 @@ static struct event_command trigger_trac |
| |
| #ifdef CONFIG_TRACER_SNAPSHOT |
| static void |
| -snapshot_trigger(struct event_trigger_data *data, void *rec) |
| +snapshot_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| tracing_snapshot(); |
| } |
| |
| static void |
| -snapshot_count_trigger(struct event_trigger_data *data, void *rec) |
| +snapshot_count_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| if (!data->count) |
| return; |
| @@ -1053,7 +1060,7 @@ snapshot_count_trigger(struct event_trig |
| if (data->count != -1) |
| (data->count)--; |
| |
| - snapshot_trigger(data, rec); |
| + snapshot_trigger(data, rec, event); |
| } |
| |
| static int |
| @@ -1132,13 +1139,15 @@ static __init int register_trigger_snaps |
| #define STACK_SKIP 3 |
| |
| static void |
| -stacktrace_trigger(struct event_trigger_data *data, void *rec) |
| +stacktrace_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| trace_dump_stack(STACK_SKIP); |
| } |
| |
| static void |
| -stacktrace_count_trigger(struct event_trigger_data *data, void *rec) |
| +stacktrace_count_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| if (!data->count) |
| return; |
| @@ -1146,7 +1155,7 @@ stacktrace_count_trigger(struct event_tr |
| if (data->count != -1) |
| (data->count)--; |
| |
| - stacktrace_trigger(data, rec); |
| + stacktrace_trigger(data, rec, event); |
| } |
| |
| static int |
| @@ -1208,7 +1217,8 @@ static __init void unregister_trigger_tr |
| } |
| |
| static void |
| -event_enable_trigger(struct event_trigger_data *data, void *rec) |
| +event_enable_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| struct enable_trigger_data *enable_data = data->private_data; |
| |
| @@ -1219,7 +1229,8 @@ event_enable_trigger(struct event_trigge |
| } |
| |
| static void |
| -event_enable_count_trigger(struct event_trigger_data *data, void *rec) |
| +event_enable_count_trigger(struct event_trigger_data *data, void *rec, |
| + struct ring_buffer_event *event) |
| { |
| struct enable_trigger_data *enable_data = data->private_data; |
| |
| @@ -1233,7 +1244,7 @@ event_enable_count_trigger(struct event_ |
| if (data->count != -1) |
| (data->count)--; |
| |
| - event_enable_trigger(data, rec); |
| + event_enable_trigger(data, rec, event); |
| } |
| |
| int event_enable_trigger_print(struct seq_file *m, |