]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
tracing: Lock event_mutex before synth_event_mutex
authorMasami Hiramatsu <mhiramat@kernel.org>
Mon, 5 Nov 2018 09:00:43 +0000 (18:00 +0900)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Sun, 9 Dec 2018 01:54:09 +0000 (20:54 -0500)
synthetic event is using synth_event_mutex for protecting
synth_event_list, and event_trigger_write() path acquires
locks as below order.

event_trigger_write(event_mutex)
  ->trigger_process_regex(trigger_cmd_mutex)
    ->event_hist_trigger_func(synth_event_mutex)

On the other hand, synthetic event creation and deletion paths
call trace_add_event_call() and trace_remove_event_call()
which acquires event_mutex. In that case, if we keep the
synth_event_mutex locked while registering/unregistering synthetic
events, its dependency will be inversed.

To avoid this issue, current synthetic event is using a 2 phase
process to create/delete events. For example, it searches existing
events under synth_event_mutex to check for event-name conflicts, and
unlocks synth_event_mutex, then registers a new event under event_mutex
locked. Finally, it locks synth_event_mutex and tries to add the
new event to the list. But it can introduce complexity and a chance
for name conflicts.

To solve this simpler, this introduces trace_add_event_call_nolock()
and trace_remove_event_call_nolock() which don't acquire
event_mutex inside. synthetic event can lock event_mutex before
synth_event_mutex to solve the lock dependency issue simpler.

Link: http://lkml.kernel.org/r/154140844377.17322.13781091165954002713.stgit@devbox
Reviewed-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Tested-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
include/linux/trace_events.h
kernel/trace/trace_events.c
kernel/trace/trace_events_hist.c

index 4130a5497d40523c0f01a52d1235edb31a3ee4f2..3aa05593a53f152b001afe7e4110e411ff88393c 100644 (file)
@@ -529,6 +529,8 @@ extern int trace_event_raw_init(struct trace_event_call *call);
 extern int trace_define_field(struct trace_event_call *call, const char *type,
                              const char *name, int offset, int size,
                              int is_signed, int filter_type);
+extern int trace_add_event_call_nolock(struct trace_event_call *call);
+extern int trace_remove_event_call_nolock(struct trace_event_call *call);
 extern int trace_add_event_call(struct trace_event_call *call);
 extern int trace_remove_event_call(struct trace_event_call *call);
 extern int trace_event_get_offsets(struct trace_event_call *call);
index f94be0c2827b0e04aa78fdde06c78674647cc4f7..a3b157f689ee549025206f3f365639e0fea0e3c9 100644 (file)
@@ -2305,11 +2305,11 @@ __trace_early_add_new_event(struct trace_event_call *call,
 struct ftrace_module_file_ops;
 static void __add_event_to_tracers(struct trace_event_call *call);
 
-/* Add an additional event_call dynamically */
-int trace_add_event_call(struct trace_event_call *call)
+int trace_add_event_call_nolock(struct trace_event_call *call)
 {
        int ret;
-       mutex_lock(&event_mutex);
+       lockdep_assert_held(&event_mutex);
+
        mutex_lock(&trace_types_lock);
 
        ret = __register_event(call, NULL);
@@ -2317,6 +2317,16 @@ int trace_add_event_call(struct trace_event_call *call)
                __add_event_to_tracers(call);
 
        mutex_unlock(&trace_types_lock);
+       return ret;
+}
+
+/* Add an additional event_call dynamically */
+int trace_add_event_call(struct trace_event_call *call)
+{
+       int ret;
+
+       mutex_lock(&event_mutex);
+       ret = trace_add_event_call_nolock(call);
        mutex_unlock(&event_mutex);
        return ret;
 }
@@ -2366,17 +2376,29 @@ static int probe_remove_event_call(struct trace_event_call *call)
        return 0;
 }
 
-/* Remove an event_call */
-int trace_remove_event_call(struct trace_event_call *call)
+/* no event_mutex version */
+int trace_remove_event_call_nolock(struct trace_event_call *call)
 {
        int ret;
 
-       mutex_lock(&event_mutex);
+       lockdep_assert_held(&event_mutex);
+
        mutex_lock(&trace_types_lock);
        down_write(&trace_event_sem);
        ret = probe_remove_event_call(call);
        up_write(&trace_event_sem);
        mutex_unlock(&trace_types_lock);
+
+       return ret;
+}
+
+/* Remove an event_call */
+int trace_remove_event_call(struct trace_event_call *call)
+{
+       int ret;
+
+       mutex_lock(&event_mutex);
+       ret = trace_remove_event_call_nolock(call);
        mutex_unlock(&event_mutex);
 
        return ret;
index eb908ef2ececf336fc2ed8996439005a32f5afc9..1670c65389fe91bbfb63b5072de23693e7785181 100644 (file)
@@ -912,7 +912,7 @@ static int register_synth_event(struct synth_event *event)
        call->data = event;
        call->tp = event->tp;
 
-       ret = trace_add_event_call(call);
+       ret = trace_add_event_call_nolock(call);
        if (ret) {
                pr_warn("Failed to register synthetic event: %s\n",
                        trace_event_name(call));
@@ -936,7 +936,7 @@ static int unregister_synth_event(struct synth_event *event)
        struct trace_event_call *call = &event->call;
        int ret;
 
-       ret = trace_remove_event_call(call);
+       ret = trace_remove_event_call_nolock(call);
 
        return ret;
 }
@@ -1013,12 +1013,10 @@ static void add_or_delete_synth_event(struct synth_event *event, int delete)
        if (delete)
                free_synth_event(event);
        else {
-               mutex_lock(&synth_event_mutex);
                if (!find_synth_event(event->name))
                        list_add(&event->list, &synth_event_list);
                else
                        free_synth_event(event);
-               mutex_unlock(&synth_event_mutex);
        }
 }
 
@@ -1030,6 +1028,7 @@ static int create_synth_event(int argc, char **argv)
        int i, consumed = 0, n_fields = 0, ret = 0;
        char *name;
 
+       mutex_lock(&event_mutex);
        mutex_lock(&synth_event_mutex);
 
        /*
@@ -1102,8 +1101,6 @@ static int create_synth_event(int argc, char **argv)
                goto err;
        }
  out:
-       mutex_unlock(&synth_event_mutex);
-
        if (event) {
                if (delete_event) {
                        ret = unregister_synth_event(event);
@@ -1113,10 +1110,13 @@ static int create_synth_event(int argc, char **argv)
                        add_or_delete_synth_event(event, ret);
                }
        }
+       mutex_unlock(&synth_event_mutex);
+       mutex_unlock(&event_mutex);
 
        return ret;
  err:
        mutex_unlock(&synth_event_mutex);
+       mutex_unlock(&event_mutex);
 
        for (i = 0; i < n_fields; i++)
                free_synth_field(fields[i]);
@@ -1127,12 +1127,10 @@ static int create_synth_event(int argc, char **argv)
 
 static int release_all_synth_events(void)
 {
-       struct list_head release_events;
        struct synth_event *event, *e;
        int ret = 0;
 
-       INIT_LIST_HEAD(&release_events);
-
+       mutex_lock(&event_mutex);
        mutex_lock(&synth_event_mutex);
 
        list_for_each_entry(event, &synth_event_list, list) {
@@ -1142,16 +1140,14 @@ static int release_all_synth_events(void)
                }
        }
 
-       list_splice_init(&event->list, &release_events);
-
-       mutex_unlock(&synth_event_mutex);
-
-       list_for_each_entry_safe(event, e, &release_events, list) {
+       list_for_each_entry_safe(event, e, &synth_event_list, list) {
                list_del(&event->list);
 
                ret = unregister_synth_event(event);
                add_or_delete_synth_event(event, !ret);
        }
+       mutex_unlock(&synth_event_mutex);
+       mutex_unlock(&event_mutex);
 
        return ret;
 }