]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/trace/trace.c
tracing: Add trace_total_entries() / trace_total_entries_cpu()
[linux.git] / kernel / trace / trace.c
index c4238b441624415cfd6113bb36ea5d71b6eaaa54..dcb9adb44be9d2198058c1f03bce683191dbb961 100644 (file)
@@ -894,7 +894,7 @@ int __trace_bputs(unsigned long ip, const char *str)
 EXPORT_SYMBOL_GPL(__trace_bputs);
 
 #ifdef CONFIG_TRACER_SNAPSHOT
-void tracing_snapshot_instance(struct trace_array *tr)
+void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
 {
        struct tracer *tracer = tr->current_trace;
        unsigned long flags;
@@ -920,10 +920,15 @@ void tracing_snapshot_instance(struct trace_array *tr)
        }
 
        local_irq_save(flags);
-       update_max_tr(tr, current, smp_processor_id());
+       update_max_tr(tr, current, smp_processor_id(), cond_data);
        local_irq_restore(flags);
 }
 
+void tracing_snapshot_instance(struct trace_array *tr)
+{
+       tracing_snapshot_instance_cond(tr, NULL);
+}
+
 /**
  * tracing_snapshot - take a snapshot of the current buffer.
  *
@@ -946,6 +951,54 @@ void tracing_snapshot(void)
 }
 EXPORT_SYMBOL_GPL(tracing_snapshot);
 
+/**
+ * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
+ * @tr:                The tracing instance to snapshot
+ * @cond_data: The data to be tested conditionally, and possibly saved
+ *
+ * This is the same as tracing_snapshot() except that the snapshot is
+ * conditional - the snapshot will only happen if the
+ * cond_snapshot.update() implementation receiving the cond_data
+ * returns true, which means that the trace array's cond_snapshot
+ * update() operation used the cond_data to determine whether the
+ * snapshot should be taken, and if it was, presumably saved it along
+ * with the snapshot.
+ */
+void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
+{
+       tracing_snapshot_instance_cond(tr, cond_data);
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
+
+/**
+ * tracing_snapshot_cond_data - get the user data associated with a snapshot
+ * @tr:                The tracing instance
+ *
+ * When the user enables a conditional snapshot using
+ * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
+ * with the snapshot.  This accessor is used to retrieve it.
+ *
+ * Should not be called from cond_snapshot.update(), since it takes
+ * the tr->max_lock lock, which the code calling
+ * cond_snapshot.update() has already done.
+ *
+ * Returns the cond_data associated with the trace array's snapshot.
+ */
+void *tracing_cond_snapshot_data(struct trace_array *tr)
+{
+       void *cond_data = NULL;
+
+       arch_spin_lock(&tr->max_lock);
+
+       if (tr->cond_snapshot)
+               cond_data = tr->cond_snapshot->cond_data;
+
+       arch_spin_unlock(&tr->max_lock);
+
+       return cond_data;
+}
+EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
+
 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
                                        struct trace_buffer *size_buf, int cpu_id);
 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
@@ -1025,12 +1078,111 @@ void tracing_snapshot_alloc(void)
        tracing_snapshot();
 }
 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
+
+/**
+ * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
+ * @tr:                The tracing instance
+ * @cond_data: User data to associate with the snapshot
+ * @update:    Implementation of the cond_snapshot update function
+ *
+ * Check whether the conditional snapshot for the given instance has
+ * already been enabled, or if the current tracer is already using a
+ * snapshot; if so, return -EBUSY, else create a cond_snapshot and
+ * save the cond_data and update function inside.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
+                                cond_update_fn_t update)
+{
+       struct cond_snapshot *cond_snapshot;
+       int ret = 0;
+
+       cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
+       if (!cond_snapshot)
+               return -ENOMEM;
+
+       cond_snapshot->cond_data = cond_data;
+       cond_snapshot->update = update;
+
+       mutex_lock(&trace_types_lock);
+
+       ret = tracing_alloc_snapshot_instance(tr);
+       if (ret)
+               goto fail_unlock;
+
+       if (tr->current_trace->use_max_tr) {
+               ret = -EBUSY;
+               goto fail_unlock;
+       }
+
+       /*
+        * The cond_snapshot can only change to NULL without the
+        * trace_types_lock. We don't care if we race with it going
+        * to NULL, but we want to make sure that it's not set to
+        * something other than NULL when we get here, which we can
+        * do safely with only holding the trace_types_lock and not
+        * having to take the max_lock.
+        */
+       if (tr->cond_snapshot) {
+               ret = -EBUSY;
+               goto fail_unlock;
+       }
+
+       arch_spin_lock(&tr->max_lock);
+       tr->cond_snapshot = cond_snapshot;
+       arch_spin_unlock(&tr->max_lock);
+
+       mutex_unlock(&trace_types_lock);
+
+       return ret;
+
+ fail_unlock:
+       mutex_unlock(&trace_types_lock);
+       kfree(cond_snapshot);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
+
+/**
+ * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
+ * @tr:                The tracing instance
+ *
+ * Check whether the conditional snapshot for the given instance is
+ * enabled; if so, free the cond_snapshot associated with it,
+ * otherwise return -EINVAL.
+ *
+ * Returns 0 if successful, error otherwise.
+ */
+int tracing_snapshot_cond_disable(struct trace_array *tr)
+{
+       int ret = 0;
+
+       arch_spin_lock(&tr->max_lock);
+
+       if (!tr->cond_snapshot)
+               ret = -EINVAL;
+       else {
+               kfree(tr->cond_snapshot);
+               tr->cond_snapshot = NULL;
+       }
+
+       arch_spin_unlock(&tr->max_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
 #else
 void tracing_snapshot(void)
 {
        WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
 }
 EXPORT_SYMBOL_GPL(tracing_snapshot);
+void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
+{
+       WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
 int tracing_alloc_snapshot(void)
 {
        WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
@@ -1043,6 +1195,21 @@ void tracing_snapshot_alloc(void)
        tracing_snapshot();
 }
 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
+void *tracing_cond_snapshot_data(struct trace_array *tr)
+{
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
+int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
+{
+       return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
+int tracing_snapshot_cond_disable(struct trace_array *tr)
+{
+       return false;
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
 #endif /* CONFIG_TRACER_SNAPSHOT */
 
 void tracer_tracing_off(struct trace_array *tr)
@@ -1330,7 +1497,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
        max_data->critical_start = data->critical_start;
        max_data->critical_end = data->critical_end;
 
-       memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
+       strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
        max_data->pid = tsk->pid;
        /*
         * If tsk == current, then use current_uid(), as that does not use
@@ -1354,12 +1521,14 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
  * @tr: tracer
  * @tsk: the task with the latency
  * @cpu: The cpu that initiated the trace.
+ * @cond_data: User data associated with a conditional snapshot
  *
  * Flip the buffers between the @tr and the max_tr and record information
  * about which task was the cause of this latency.
  */
 void
-update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
+             void *cond_data)
 {
        if (tr->stop_count)
                return;
@@ -1380,9 +1549,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
        else
                ring_buffer_record_off(tr->max_buffer.buffer);
 
+#ifdef CONFIG_TRACER_SNAPSHOT
+       if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
+               goto out_unlock;
+#endif
        swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
 
        __update_max_tr(tr, tsk, cpu);
+
+ out_unlock:
        arch_spin_unlock(&tr->max_lock);
 }
 
@@ -1748,7 +1923,7 @@ static inline char *get_saved_cmdlines(int idx)
 
 static inline void set_cmdline(int idx, const char *cmdline)
 {
-       memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
+       strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
 }
 
 static int allocate_cmdlines_buffer(unsigned int val,
@@ -2878,6 +3053,7 @@ void trace_printk_init_buffers(void)
        if (global_trace.trace_buffer.buffer)
                tracing_start_cmdline_record();
 }
+EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
 
 void trace_printk_start_comm(void)
 {
@@ -3038,6 +3214,7 @@ int trace_array_printk(struct trace_array *tr,
        va_end(ap);
        return ret;
 }
+EXPORT_SYMBOL_GPL(trace_array_printk);
 
 __printf(3, 4)
 int trace_array_printk_buf(struct ring_buffer *buffer,
@@ -3315,34 +3492,69 @@ static void s_stop(struct seq_file *m, void *p)
        trace_event_read_unlock();
 }
 
+static void
+get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
+                     unsigned long *entries, int cpu)
+{
+       unsigned long count;
+
+       count = ring_buffer_entries_cpu(buf->buffer, cpu);
+       /*
+        * If this buffer has skipped entries, then we hold all
+        * entries for the trace and we need to ignore the
+        * ones before the time stamp.
+        */
+       if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
+               count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
+               /* total is the same as the entries */
+               *total = count;
+       } else
+               *total = count +
+                       ring_buffer_overrun_cpu(buf->buffer, cpu);
+       *entries = count;
+}
+
 static void
 get_total_entries(struct trace_buffer *buf,
                  unsigned long *total, unsigned long *entries)
 {
-       unsigned long count;
+       unsigned long t, e;
        int cpu;
 
        *total = 0;
        *entries = 0;
 
        for_each_tracing_cpu(cpu) {
-               count = ring_buffer_entries_cpu(buf->buffer, cpu);
-               /*
-                * If this buffer has skipped entries, then we hold all
-                * entries for the trace and we need to ignore the
-                * ones before the time stamp.
-                */
-               if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
-                       count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
-                       /* total is the same as the entries */
-                       *total += count;
-               } else
-                       *total += count +
-                               ring_buffer_overrun_cpu(buf->buffer, cpu);
-               *entries += count;
+               get_total_entries_cpu(buf, &t, &e, cpu);
+               *total += t;
+               *entries += e;
        }
 }
 
+unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
+{
+       unsigned long total, entries;
+
+       if (!tr)
+               tr = &global_trace;
+
+       get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
+
+       return entries;
+}
+
+unsigned long trace_total_entries(struct trace_array *tr)
+{
+       unsigned long total, entries;
+
+       if (!tr)
+               tr = &global_trace;
+
+       get_total_entries(&tr->trace_buffer, &total, &entries);
+
+       return entries;
+}
+
 static void print_lat_help_header(struct seq_file *m)
 {
        seq_puts(m, "#                  _------=> CPU#            \n"
@@ -3904,7 +4116,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
        if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
                for_each_tracing_cpu(cpu) {
                        iter->buffer_iter[cpu] =
-                               ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
+                               ring_buffer_read_prepare(iter->trace_buffer->buffer,
+                                                        cpu, GFP_KERNEL);
                }
                ring_buffer_read_prepare_sync();
                for_each_tracing_cpu(cpu) {
@@ -3914,7 +4127,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
        } else {
                cpu = iter->cpu_file;
                iter->buffer_iter[cpu] =
-                       ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
+                       ring_buffer_read_prepare(iter->trace_buffer->buffer,
+                                                cpu, GFP_KERNEL);
                ring_buffer_read_prepare_sync();
                ring_buffer_read_start(iter->buffer_iter[cpu]);
                tracing_iter_reset(iter, cpu);
@@ -4523,6 +4737,7 @@ static const char readme_msg[] =
        "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
        "  current_tracer\t- function and latency tracers\n"
        "  available_tracers\t- list of configured tracers for current_tracer\n"
+       "  error_log\t- error log for failed commands (that support it)\n"
        "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
        "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
        "  trace_clock\t\t-change the clock used to order events\n"
@@ -4702,6 +4917,7 @@ static const char readme_msg[] =
        "\t            [:size=#entries]\n"
        "\t            [:pause][:continue][:clear]\n"
        "\t            [:name=histname1]\n"
+       "\t            [:<handler>.<action>]\n"
        "\t            [if <filter>]\n\n"
        "\t    When a matching event is hit, an entry is added to a hash\n"
        "\t    table using the key(s) and value(s) named, and the value of a\n"
@@ -4742,8 +4958,21 @@ static const char readme_msg[] =
        "\t    unchanged.\n\n"
        "\t    The enable_hist and disable_hist triggers can be used to\n"
        "\t    have one event conditionally start and stop another event's\n"
-       "\t    already-attached hist trigger.  The syntax is analagous to\n"
-       "\t    the enable_event and disable_event triggers.\n"
+       "\t    already-attached hist trigger.  The syntax is analogous to\n"
+       "\t    the enable_event and disable_event triggers.\n\n"
+       "\t    Hist trigger handlers and actions are executed whenever a\n"
+       "\t    a histogram entry is added or updated.  They take the form:\n\n"
+       "\t        <handler>.<action>\n\n"
+       "\t    The available handlers are:\n\n"
+       "\t        onmatch(matching.event)  - invoke on addition or update\n"
+       "\t        onmax(var)               - invoke if var exceeds current max\n"
+       "\t        onchange(var)            - invoke action if var changes\n\n"
+       "\t    The available actions are:\n\n"
+       "\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
+       "\t        save(field,...)                      - save current event fields\n"
+#ifdef CONFIG_TRACER_SNAPSHOT
+       "\t        snapshot()                           - snapshot the trace buffer\n"
+#endif
 #endif
 ;
 
@@ -5388,6 +5617,16 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
        if (t == tr->current_trace)
                goto out;
 
+#ifdef CONFIG_TRACER_SNAPSHOT
+       if (t->use_max_tr) {
+               arch_spin_lock(&tr->max_lock);
+               if (tr->cond_snapshot)
+                       ret = -EBUSY;
+               arch_spin_unlock(&tr->max_lock);
+               if (ret)
+                       goto out;
+       }
+#endif
        /* Some tracers won't work on kernel command line */
        if (system_state < SYSTEM_RUNNING && t->noboot) {
                pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
@@ -5626,7 +5865,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
        return ret;
 
 fail:
-       kfree(iter->trace);
        kfree(iter);
        __trace_array_put(tr);
        mutex_unlock(&trace_types_lock);
@@ -5825,7 +6063,6 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
 }
 
 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
-       .can_merge              = 0,
        .confirm                = generic_pipe_buf_confirm,
        .release                = generic_pipe_buf_release,
        .steal                  = generic_pipe_buf_steal,
@@ -6470,6 +6707,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
                goto out;
        }
 
+       arch_spin_lock(&tr->max_lock);
+       if (tr->cond_snapshot)
+               ret = -EBUSY;
+       arch_spin_unlock(&tr->max_lock);
+       if (ret)
+               goto out;
+
        switch (val) {
        case 0:
                if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
@@ -6495,7 +6739,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
                local_irq_disable();
                /* Now, we're going to swap */
                if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
-                       update_max_tr(tr, current, smp_processor_id());
+                       update_max_tr(tr, current, smp_processor_id(), NULL);
                else
                        update_max_tr_single(tr, current, iter->cpu_file);
                local_irq_enable();
@@ -6670,6 +6914,238 @@ static const struct file_operations snapshot_raw_fops = {
 
 #endif /* CONFIG_TRACER_SNAPSHOT */
 
+#define TRACING_LOG_ERRS_MAX   8
+#define TRACING_LOG_LOC_MAX    128
+
+#define CMD_PREFIX "  Command: "
+
+struct err_info {
+       const char      **errs; /* ptr to loc-specific array of err strings */
+       u8              type;   /* index into errs -> specific err string */
+       u8              pos;    /* MAX_FILTER_STR_VAL = 256 */
+       u64             ts;
+};
+
+struct tracing_log_err {
+       struct list_head        list;
+       struct err_info         info;
+       char                    loc[TRACING_LOG_LOC_MAX]; /* err location */
+       char                    cmd[MAX_FILTER_STR_VAL]; /* what caused err */
+};
+
+static DEFINE_MUTEX(tracing_err_log_lock);
+
+struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
+{
+       struct tracing_log_err *err;
+
+       if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
+               err = kzalloc(sizeof(*err), GFP_KERNEL);
+               if (!err)
+                       err = ERR_PTR(-ENOMEM);
+               tr->n_err_log_entries++;
+
+               return err;
+       }
+
+       err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
+       list_del(&err->list);
+
+       return err;
+}
+
+/**
+ * err_pos - find the position of a string within a command for error careting
+ * @cmd: The tracing command that caused the error
+ * @str: The string to position the caret at within @cmd
+ *
+ * Finds the position of the first occurence of @str within @cmd.  The
+ * return value can be passed to tracing_log_err() for caret placement
+ * within @cmd.
+ *
+ * Returns the index within @cmd of the first occurence of @str or 0
+ * if @str was not found.
+ */
+unsigned int err_pos(char *cmd, const char *str)
+{
+       char *found;
+
+       if (WARN_ON(!strlen(cmd)))
+               return 0;
+
+       found = strstr(cmd, str);
+       if (found)
+               return found - cmd;
+
+       return 0;
+}
+
+/**
+ * tracing_log_err - write an error to the tracing error log
+ * @tr: The associated trace array for the error (NULL for top level array)
+ * @loc: A string describing where the error occurred
+ * @cmd: The tracing command that caused the error
+ * @errs: The array of loc-specific static error strings
+ * @type: The index into errs[], which produces the specific static err string
+ * @pos: The position the caret should be placed in the cmd
+ *
+ * Writes an error into tracing/error_log of the form:
+ *
+ * <loc>: error: <text>
+ *   Command: <cmd>
+ *              ^
+ *
+ * tracing/error_log is a small log file containing the last
+ * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
+ * unless there has been a tracing error, and the error log can be
+ * cleared and have its memory freed by writing the empty string in
+ * truncation mode to it i.e. echo > tracing/error_log.
+ *
+ * NOTE: the @errs array along with the @type param are used to
+ * produce a static error string - this string is not copied and saved
+ * when the error is logged - only a pointer to it is saved.  See
+ * existing callers for examples of how static strings are typically
+ * defined for use with tracing_log_err().
+ */
+void tracing_log_err(struct trace_array *tr,
+                    const char *loc, const char *cmd,
+                    const char **errs, u8 type, u8 pos)
+{
+       struct tracing_log_err *err;
+
+       if (!tr)
+               tr = &global_trace;
+
+       mutex_lock(&tracing_err_log_lock);
+       err = get_tracing_log_err(tr);
+       if (PTR_ERR(err) == -ENOMEM) {
+               mutex_unlock(&tracing_err_log_lock);
+               return;
+       }
+
+       snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
+       snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
+
+       err->info.errs = errs;
+       err->info.type = type;
+       err->info.pos = pos;
+       err->info.ts = local_clock();
+
+       list_add_tail(&err->list, &tr->err_log);
+       mutex_unlock(&tracing_err_log_lock);
+}
+
+static void clear_tracing_err_log(struct trace_array *tr)
+{
+       struct tracing_log_err *err, *next;
+
+       mutex_lock(&tracing_err_log_lock);
+       list_for_each_entry_safe(err, next, &tr->err_log, list) {
+               list_del(&err->list);
+               kfree(err);
+       }
+
+       tr->n_err_log_entries = 0;
+       mutex_unlock(&tracing_err_log_lock);
+}
+
+static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
+{
+       struct trace_array *tr = m->private;
+
+       mutex_lock(&tracing_err_log_lock);
+
+       return seq_list_start(&tr->err_log, *pos);
+}
+
+static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct trace_array *tr = m->private;
+
+       return seq_list_next(v, &tr->err_log, pos);
+}
+
+static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
+{
+       mutex_unlock(&tracing_err_log_lock);
+}
+
+static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
+{
+       u8 i;
+
+       for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
+               seq_putc(m, ' ');
+       for (i = 0; i < pos; i++)
+               seq_putc(m, ' ');
+       seq_puts(m, "^\n");
+}
+
+static int tracing_err_log_seq_show(struct seq_file *m, void *v)
+{
+       struct tracing_log_err *err = v;
+
+       if (err) {
+               const char *err_text = err->info.errs[err->info.type];
+               u64 sec = err->info.ts;
+               u32 nsec;
+
+               nsec = do_div(sec, NSEC_PER_SEC);
+               seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
+                          err->loc, err_text);
+               seq_printf(m, "%s", err->cmd);
+               tracing_err_log_show_pos(m, err->info.pos);
+       }
+
+       return 0;
+}
+
+static const struct seq_operations tracing_err_log_seq_ops = {
+       .start  = tracing_err_log_seq_start,
+       .next   = tracing_err_log_seq_next,
+       .stop   = tracing_err_log_seq_stop,
+       .show   = tracing_err_log_seq_show
+};
+
+static int tracing_err_log_open(struct inode *inode, struct file *file)
+{
+       struct trace_array *tr = inode->i_private;
+       int ret = 0;
+
+       if (trace_array_get(tr) < 0)
+               return -ENODEV;
+
+       /* If this file was opened for write, then erase contents */
+       if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
+               clear_tracing_err_log(tr);
+
+       if (file->f_mode & FMODE_READ) {
+               ret = seq_open(file, &tracing_err_log_seq_ops);
+               if (!ret) {
+                       struct seq_file *m = file->private_data;
+                       m->private = tr;
+               } else {
+                       trace_array_put(tr);
+               }
+       }
+       return ret;
+}
+
+static ssize_t tracing_err_log_write(struct file *file,
+                                    const char __user *buffer,
+                                    size_t count, loff_t *ppos)
+{
+       return count;
+}
+
+static const struct file_operations tracing_err_log_fops = {
+       .open           = tracing_err_log_open,
+       .write          = tracing_err_log_write,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = tracing_release_generic_tr,
+};
+
 static int tracing_buffers_open(struct inode *inode, struct file *filp)
 {
        struct trace_array *tr = inode->i_private;
@@ -6845,7 +7321,6 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
 
 /* Pipe buffer operations for a buffer. */
 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
-       .can_merge              = 0,
        .confirm                = generic_pipe_buf_confirm,
        .release                = buffer_pipe_buf_release,
        .steal                  = generic_pipe_buf_steal,
@@ -7832,7 +8307,7 @@ static void update_tracer_options(struct trace_array *tr)
        mutex_unlock(&trace_types_lock);
 }
 
-static int instance_mkdir(const char *name)
+struct trace_array *trace_array_create(const char *name)
 {
        struct trace_array *tr;
        int ret;
@@ -7871,6 +8346,7 @@ static int instance_mkdir(const char *name)
        INIT_LIST_HEAD(&tr->systems);
        INIT_LIST_HEAD(&tr->events);
        INIT_LIST_HEAD(&tr->hist_vars);
+       INIT_LIST_HEAD(&tr->err_log);
 
        if (allocate_trace_buffers(tr, trace_buf_size) < 0)
                goto out_free_tr;
@@ -7896,7 +8372,7 @@ static int instance_mkdir(const char *name)
        mutex_unlock(&trace_types_lock);
        mutex_unlock(&event_mutex);
 
-       return 0;
+       return tr;
 
  out_free_tr:
        free_trace_buffers(tr);
@@ -7908,33 +8384,21 @@ static int instance_mkdir(const char *name)
        mutex_unlock(&trace_types_lock);
        mutex_unlock(&event_mutex);
 
-       return ret;
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(trace_array_create);
 
+static int instance_mkdir(const char *name)
+{
+       return PTR_ERR_OR_ZERO(trace_array_create(name));
 }
 
-static int instance_rmdir(const char *name)
+static int __remove_instance(struct trace_array *tr)
 {
-       struct trace_array *tr;
-       int found = 0;
-       int ret;
        int i;
 
-       mutex_lock(&event_mutex);
-       mutex_lock(&trace_types_lock);
-
-       ret = -ENODEV;
-       list_for_each_entry(tr, &ftrace_trace_arrays, list) {
-               if (tr->name && strcmp(tr->name, name) == 0) {
-                       found = 1;
-                       break;
-               }
-       }
-       if (!found)
-               goto out_unlock;
-
-       ret = -EBUSY;
        if (tr->ref || (tr->current_trace && tr->current_trace->ref))
-               goto out_unlock;
+               return -EBUSY;
 
        list_del(&tr->list);
 
@@ -7960,10 +8424,46 @@ static int instance_rmdir(const char *name)
        free_cpumask_var(tr->tracing_cpumask);
        kfree(tr->name);
        kfree(tr);
+       tr = NULL;
 
-       ret = 0;
+       return 0;
+}
+
+int trace_array_destroy(struct trace_array *tr)
+{
+       int ret;
+
+       if (!tr)
+               return -EINVAL;
+
+       mutex_lock(&event_mutex);
+       mutex_lock(&trace_types_lock);
+
+       ret = __remove_instance(tr);
+
+       mutex_unlock(&trace_types_lock);
+       mutex_unlock(&event_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(trace_array_destroy);
+
+static int instance_rmdir(const char *name)
+{
+       struct trace_array *tr;
+       int ret;
+
+       mutex_lock(&event_mutex);
+       mutex_lock(&trace_types_lock);
+
+       ret = -ENODEV;
+       list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+               if (tr->name && strcmp(tr->name, name) == 0) {
+                       ret = __remove_instance(tr);
+                       break;
+               }
+       }
 
- out_unlock:
        mutex_unlock(&trace_types_lock);
        mutex_unlock(&event_mutex);
 
@@ -8053,6 +8553,9 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
                          tr, &snapshot_fops);
 #endif
 
+       trace_create_file("error_log", 0644, d_tracer,
+                         tr, &tracing_err_log_fops);
+
        for_each_tracing_cpu(cpu)
                tracing_init_tracefs_percpu(tr, cpu);
 
@@ -8638,6 +9141,7 @@ __init static int tracer_alloc_buffers(void)
        INIT_LIST_HEAD(&global_trace.systems);
        INIT_LIST_HEAD(&global_trace.events);
        INIT_LIST_HEAD(&global_trace.hist_vars);
+       INIT_LIST_HEAD(&global_trace.err_log);
        list_add(&global_trace.list, &ftrace_trace_arrays);
 
        apply_trace_boot_options();