]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
tracing: Add support for preempt and irq enable/disable events
authorJoel Fernandes <joelaf@google.com>
Tue, 10 Oct 2017 22:51:37 +0000 (15:51 -0700)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Tue, 10 Oct 2017 22:58:43 +0000 (18:58 -0400)
Preempt and irq trace events can be used for tracing the start and
end of an atomic section which can be used by a trace viewer like
systrace to graphically view the start and end of an atomic section and
correlate them with latencies and scheduling issues.

This also serves as a prelude to using synthetic events or probes to
rewrite the preempt and irqsoff tracers, along with numerous benefits of
using trace events features for these events.
Link: http://lkml.kernel.org/r/20171006005432.14244-3-joelaf@google.com
Link: http://lkml.kernel.org/r/20171010225137.17370-1-joelaf@google.com
Cc: Peter Zilstra <peterz@infradead.org>
Cc: kernel-team@android.com
Signed-off-by: Joel Fernandes <joelaf@google.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
include/linux/ftrace.h
include/trace/events/preemptirq.h [new file with mode: 0644]
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/trace_irqsoff.c

index 346f8294e40a118f6bedd6ef957deca6dc1d3ee9..1f8545caa691563f7eb6645f79c0b3227d977e00 100644 (file)
@@ -769,7 +769,8 @@ static inline unsigned long get_lock_parent_ip(void)
   static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
 #endif
 
-#ifdef CONFIG_PREEMPT_TRACER
+#if defined(CONFIG_PREEMPT_TRACER) || \
+       (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
 #else
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
new file mode 100644 (file)
index 0000000..f5024c5
--- /dev/null
@@ -0,0 +1,70 @@
+#ifdef CONFIG_PREEMPTIRQ_EVENTS
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM preemptirq
+
+#if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PREEMPTIRQ_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <linux/string.h>
+#include <asm/sections.h>
+
+DECLARE_EVENT_CLASS(preemptirq_template,
+
+       TP_PROTO(unsigned long ip, unsigned long parent_ip),
+
+       TP_ARGS(ip, parent_ip),
+
+       TP_STRUCT__entry(
+               __field(u32, caller_offs)
+               __field(u32, parent_offs)
+       ),
+
+       TP_fast_assign(
+               __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
+               __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+       ),
+
+       TP_printk("caller=%pF parent=%pF",
+                 (void *)((unsigned long)(_stext) + __entry->caller_offs),
+                 (void *)((unsigned long)(_stext) + __entry->parent_offs))
+);
+
+#ifndef CONFIG_PROVE_LOCKING
+DEFINE_EVENT(preemptirq_template, irq_disable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, irq_enable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+#endif
+
+#ifdef CONFIG_DEBUG_PREEMPT
+DEFINE_EVENT(preemptirq_template, preempt_disable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, preempt_enable,
+            TP_PROTO(unsigned long ip, unsigned long parent_ip),
+            TP_ARGS(ip, parent_ip));
+#endif
+
+#endif /* _TRACE_PREEMPTIRQ_H */
+
+#include <trace/define_trace.h>
+
+#else /* !CONFIG_PREEMPTIRQ_EVENTS */
+
+#define trace_irq_enable(...)
+#define trace_irq_disable(...)
+#define trace_preempt_enable(...)
+#define trace_preempt_disable(...)
+#define trace_irq_enable_rcuidle(...)
+#define trace_irq_disable_rcuidle(...)
+#define trace_preempt_enable_rcuidle(...)
+#define trace_preempt_disable_rcuidle(...)
+
+#endif
index 434c840e2d82f64c127a5c13f1f6aa244544ed43..b8395a02082126516069d160edadedc4542e4090 100644 (file)
@@ -160,6 +160,17 @@ config FUNCTION_GRAPH_TRACER
          address on the current task structure into a stack of calls.
 
 
+config PREEMPTIRQ_EVENTS
+       bool "Enable trace events for preempt and irq disable/enable"
+       select TRACE_IRQFLAGS
+       depends on DEBUG_PREEMPT || !PROVE_LOCKING
+       default n
+       help
+         Enable tracing of disable and enable events for preemption and irqs.
+         For tracing preempt disable/enable events, DEBUG_PREEMPT must be
+         enabled. For tracing irq disable/enable events, PROVE_LOCKING must
+         be disabled.
+
 config IRQSOFF_TRACER
        bool "Interrupts-off Latency Tracer"
        default n
index 90f2701d92a7eee98334f2b10e515b369307df2b..9f62eee61f14fad2ef93d86f764eab2fb19602ae 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_TRACING) += trace_printk.o
 obj-$(CONFIG_TRACING_MAP) += tracing_map.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
+obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o
 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
index 0e3033c00474bd5d45f3b6e08c8214eb7ffbeb39..03ecb4465ee4587290e0474143f425f892771140 100644 (file)
@@ -16,6 +16,9 @@
 
 #include "trace.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/preemptirq.h>
+
 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
 static struct trace_array              *irqsoff_trace __read_mostly;
 static int                             tracer_enabled __read_mostly;
@@ -777,26 +780,53 @@ static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
 #endif
 
 #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
+/* Per-cpu variable to prevent redundant calls when IRQs already off */
+static DEFINE_PER_CPU(int, tracing_irq_cpu);
+
 void trace_hardirqs_on(void)
 {
+       if (!this_cpu_read(tracing_irq_cpu))
+               return;
+
+       trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
        tracer_hardirqs_on();
+
+       this_cpu_write(tracing_irq_cpu, 0);
 }
 EXPORT_SYMBOL(trace_hardirqs_on);
 
 void trace_hardirqs_off(void)
 {
+       if (this_cpu_read(tracing_irq_cpu))
+               return;
+
+       this_cpu_write(tracing_irq_cpu, 1);
+
+       trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
        tracer_hardirqs_off();
 }
 EXPORT_SYMBOL(trace_hardirqs_off);
 
 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
 {
+       if (!this_cpu_read(tracing_irq_cpu))
+               return;
+
+       trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
        tracer_hardirqs_on_caller(caller_addr);
+
+       this_cpu_write(tracing_irq_cpu, 0);
 }
 EXPORT_SYMBOL(trace_hardirqs_on_caller);
 
 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
 {
+       if (this_cpu_read(tracing_irq_cpu))
+               return;
+
+       this_cpu_write(tracing_irq_cpu, 1);
+
+       trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
        tracer_hardirqs_off_caller(caller_addr);
 }
 EXPORT_SYMBOL(trace_hardirqs_off_caller);
@@ -818,14 +848,17 @@ inline void print_irqtrace_events(struct task_struct *curr)
 }
 #endif
 
-#ifdef CONFIG_PREEMPT_TRACER
+#if defined(CONFIG_PREEMPT_TRACER) || \
+       (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
 void trace_preempt_on(unsigned long a0, unsigned long a1)
 {
+       trace_preempt_enable_rcuidle(a0, a1);
        tracer_preempt_on(a0, a1);
 }
 
 void trace_preempt_off(unsigned long a0, unsigned long a1)
 {
+       trace_preempt_disable_rcuidle(a0, a1);
        tracer_preempt_off(a0, a1);
 }
 #endif