Hi Tom, On Mon, 10 Feb 2020 17:06:50 -0600 Tom Zanussi <zanussi@xxxxxxxxxx> wrote: > Move the checking, buffer reserve and buffer commit code in > synth_event_trace_start/end() into inline functions > __synth_event_trace_start/end() so they can also be used by > synth_event_trace() and synth_event_trace_array(), and then have all > those functions use them. > > Also, change synth_event_trace_state.enabled to disabled so it only > needs to be set if the event is disabled, which is not normally the > case. I have some comments :) > diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c > index 99a02168599b..65b54d6a1422 100644 > --- a/kernel/trace/trace_events_hist.c > +++ b/kernel/trace/trace_events_hist.c > @@ -1791,6 +1791,60 @@ void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen) > } > EXPORT_SYMBOL_GPL(synth_event_cmd_init); > > +static inline int > +__synth_event_trace_start(struct trace_event_file *file, > + struct synth_event_trace_state *trace_state) > +{ > + int entry_size, fields_size = 0; > + int ret = 0; > + > + /* > + * Normal event tracing doesn't get called at all unless the > + * ENABLED bit is set (which attaches the probe thus allowing > + * this code to be called, etc). Because this is called > + * directly by the user, we don't have that but we still need > + * to honor not logging when disabled. For the the iterated > + * trace case, we save the enabed state upon start and just > + * ignore the following data calls. > + */ > + if (!(file->flags & EVENT_FILE_FL_ENABLED) || > + trace_trigger_soft_disabled(file)) { > + trace_state->disabled = true; > + ret = -ENOENT; If the state.disabled is set, I think __synth_event_trace_start() might be better to return 0 because all caller change the result. > + goto out; > + } > + > + trace_state->event = file->event_call->data; > + > + fields_size = trace_state->event->n_u64 * sizeof(u64); > + > + /* > + * Avoid ring buffer recursion detection, as this event > + * is being performed within another event. > + */ > + trace_state->buffer = file->tr->array_buffer.buffer; > + ring_buffer_nest_start(trace_state->buffer); > + > + entry_size = sizeof(*trace_state->entry) + fields_size; > + trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer, > + file, > + entry_size); > + if (!trace_state->entry) { > + ring_buffer_nest_end(trace_state->buffer); > + ret = -EINVAL; > + } > +out: > + return ret; > +} > + > +static inline void > +__synth_event_trace_end(struct synth_event_trace_state *trace_state) > +{ Should we check trace_state->disabled here too? > + trace_event_buffer_commit(&trace_state->fbuffer); > + > + ring_buffer_nest_end(trace_state->buffer); > +} > + > /** > * synth_event_trace - Trace a synthetic event > * @file: The trace_event_file representing the synthetic event > @@ -1812,69 +1866,38 @@ EXPORT_SYMBOL_GPL(synth_event_cmd_init); > */ > int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...) > { > - struct trace_event_buffer fbuffer; > - struct synth_trace_event *entry; > - struct trace_buffer *buffer; > - struct synth_event *event; > + struct synth_event_trace_state state; > unsigned int i, n_u64; > - int fields_size = 0; > va_list args; > - int ret = 0; > - > - /* > - * Normal event generation doesn't get called at all unless > - * the ENABLED bit is set (which attaches the probe thus > - * allowing this code to be called, etc). Because this is > - * called directly by the user, we don't have that but we > - * still need to honor not logging when disabled. > - */ > - if (!(file->flags & EVENT_FILE_FL_ENABLED) || > - trace_trigger_soft_disabled(file)) > - return 0; > - > - event = file->event_call->data; > - > - if (n_vals != event->n_fields) > - return -EINVAL; > - > - fields_size = event->n_u64 * sizeof(u64); > - > - /* > - * Avoid ring buffer recursion detection, as this event > - * is being performed within another event. > - */ > - buffer = file->tr->array_buffer.buffer; > - ring_buffer_nest_start(buffer); > + int ret; > > - entry = trace_event_buffer_reserve(&fbuffer, file, > - sizeof(*entry) + fields_size); > - if (!entry) { > - ret = -EINVAL; > - goto out; > + ret = __synth_event_trace_start(file, &state); > + if (ret) { And here, we can check "if (ret || state.disabled)" then > + if (ret == -ENOENT) > + ret = 0; /* just disabled, not really an error */ We don't need this check. > + return ret; > } > > va_start(args, n_vals); > - for (i = 0, n_u64 = 0; i < event->n_fields; i++) { > + for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { > u64 val; > > val = va_arg(args, u64); > > - if (event->fields[i]->is_string) { > + if (state.event->fields[i]->is_string) { > char *str_val = (char *)(long)val; > - char *str_field = (char *)&entry->fields[n_u64]; > + char *str_field = (char *)&state.entry->fields[n_u64]; > > strscpy(str_field, str_val, STR_VAR_LEN_MAX); > n_u64 += STR_VAR_LEN_MAX / sizeof(u64); > } else { > - entry->fields[n_u64] = val; > + state.entry->fields[n_u64] = val; > n_u64++; > } > } > va_end(args); > > - trace_event_buffer_commit(&fbuffer); > -out: > - ring_buffer_nest_end(buffer); > + __synth_event_trace_end(&state); > > return ret; > } > @@ -1901,62 +1924,31 @@ EXPORT_SYMBOL_GPL(synth_event_trace); > int synth_event_trace_array(struct trace_event_file *file, u64 *vals, > unsigned int n_vals) > { > - struct trace_event_buffer fbuffer; > - struct synth_trace_event *entry; > - struct trace_buffer *buffer; > - struct synth_event *event; > + struct synth_event_trace_state state; > unsigned int i, n_u64; > - int fields_size = 0; > - int ret = 0; > - > - /* > - * Normal event generation doesn't get called at all unless > - * the ENABLED bit is set (which attaches the probe thus > - * allowing this code to be called, etc). Because this is > - * called directly by the user, we don't have that but we > - * still need to honor not logging when disabled. > - */ > - if (!(file->flags & EVENT_FILE_FL_ENABLED) || > - trace_trigger_soft_disabled(file)) > - return 0; > - > - event = file->event_call->data; > - > - if (n_vals != event->n_fields) > - return -EINVAL; > - > - fields_size = event->n_u64 * sizeof(u64); > - > - /* > - * Avoid ring buffer recursion detection, as this event > - * is being performed within another event. > - */ > - buffer = file->tr->array_buffer.buffer; > - ring_buffer_nest_start(buffer); > + int ret; > > - entry = trace_event_buffer_reserve(&fbuffer, file, > - sizeof(*entry) + fields_size); > - if (!entry) { > - ret = -EINVAL; > - goto out; > + ret = __synth_event_trace_start(file, &state); > + if (ret) { > + if (ret == -ENOENT) > + ret = 0; /* just disabled, not really an error */ Ditto. > + return ret; > } > > - for (i = 0, n_u64 = 0; i < event->n_fields; i++) { > - if (event->fields[i]->is_string) { > + for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { > + if (state.event->fields[i]->is_string) { > char *str_val = (char *)(long)vals[i]; > - char *str_field = (char *)&entry->fields[n_u64]; > + char *str_field = (char *)&state.entry->fields[n_u64]; > > strscpy(str_field, str_val, STR_VAR_LEN_MAX); > n_u64 += STR_VAR_LEN_MAX / sizeof(u64); > } else { > - entry->fields[n_u64] = vals[i]; > + state.entry->fields[n_u64] = vals[i]; > n_u64++; > } > } > > - trace_event_buffer_commit(&fbuffer); > -out: > - ring_buffer_nest_end(buffer); > + __synth_event_trace_end(&state); > > return ret; > } > @@ -1993,55 +1985,17 @@ EXPORT_SYMBOL_GPL(synth_event_trace_array); > int synth_event_trace_start(struct trace_event_file *file, > struct synth_event_trace_state *trace_state) > { > - struct synth_trace_event *entry; > - int fields_size = 0; > - int ret = 0; > + int ret; > > - if (!trace_state) { > - ret = -EINVAL; > - goto out; > - } > + if (!trace_state) > + return -EINVAL; > > memset(trace_state, '\0', sizeof(*trace_state)); > > - /* > - * Normal event tracing doesn't get called at all unless the > - * ENABLED bit is set (which attaches the probe thus allowing > - * this code to be called, etc). Because this is called > - * directly by the user, we don't have that but we still need > - * to honor not logging when disabled. For the the iterated > - * trace case, we save the enabed state upon start and just > - * ignore the following data calls. > - */ > - if (!(file->flags & EVENT_FILE_FL_ENABLED) || > - trace_trigger_soft_disabled(file)) { > - trace_state->enabled = false; > - goto out; > - } > - > - trace_state->enabled = true; > + ret = __synth_event_trace_start(file, trace_state); > + if (ret == -ENOENT) > + ret = 0; /* just disabled, not really an error */ We can also skip this tweak. Thank you, > > - trace_state->event = file->event_call->data; > - > - fields_size = trace_state->event->n_u64 * sizeof(u64); > - > - /* > - * Avoid ring buffer recursion detection, as this event > - * is being performed within another event. > - */ > - trace_state->buffer = file->tr->array_buffer.buffer; > - ring_buffer_nest_start(trace_state->buffer); > - > - entry = trace_event_buffer_reserve(&trace_state->fbuffer, file, > - sizeof(*entry) + fields_size); > - if (!entry) { > - ring_buffer_nest_end(trace_state->buffer); > - ret = -EINVAL; > - goto out; > - } > - > - trace_state->entry = entry; > -out: > return ret; > } > EXPORT_SYMBOL_GPL(synth_event_trace_start); > @@ -2074,7 +2028,7 @@ static int __synth_event_add_val(const char *field_name, u64 val, > trace_state->add_next = true; > } > > - if (!trace_state->enabled) > + if (trace_state->disabled) > goto out; > > event = trace_state->event; > @@ -2209,9 +2163,7 @@ int synth_event_trace_end(struct synth_event_trace_state *trace_state) > if (!trace_state) > return -EINVAL; > > - trace_event_buffer_commit(&trace_state->fbuffer); > - > - ring_buffer_nest_end(trace_state->buffer); > + __synth_event_trace_end(trace_state); > > return 0; > } > -- > 2.14.1 > -- Masami Hiramatsu <mhiramat@xxxxxxxxxx>