aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat)2013-03-15 12:10:35 -0500
committerGreg Kroah-Hartman2013-05-11 15:54:09 -0500
commitbbc666f0002eb6c97609e59af39fca010ecf8ddd (patch)
tree5a81506403d9a053d830e14e8d53a75c74795c97
parentd7a0a30e6fcba4c014fedfdb56e19837f8030ea9 (diff)
downloadkernel-omap-bbc666f0002eb6c97609e59af39fca010ecf8ddd.tar.gz
kernel-omap-bbc666f0002eb6c97609e59af39fca010ecf8ddd.tar.xz
kernel-omap-bbc666f0002eb6c97609e59af39fca010ecf8ddd.zip
tracing: Fix ftrace_dump()
commit 7fe70b579c9e3daba71635e31b6189394e7b79d3 upstream. ftrace_dump() had a lot of issues. What ftrace_dump() does, is when ftrace_dump_on_oops is set (via a kernel parameter or sysctl), it will dump out the ftrace buffers to the console when either a oops, panic, or a sysrq-z occurs. This was written a long time ago when ftrace was fragile to recursion. But it wasn't written well even for that. There's a possible deadlock that can occur if a ftrace_dump() is happening and an NMI triggers another dump. This is because it grabs a lock before checking if the dump ran. It also totally disables ftrace, and tracing for no good reasons. As the ring_buffer now checks if it is read via a oops or NMI, where there's a chance that the buffer gets corrupted, it will disable itself. No need to have ftrace_dump() do the same. ftrace_dump() is now cleaned up where it uses an atomic counter to make sure only one dump happens at a time. A simple atomic_inc_return() is enough that is needed for both other CPUs and NMIs. No need for a spinlock, as if one CPU is running the dump, no other CPU needs to do it too. The tracing_on variable is turned off and not turned on. The original code did this, but it wasn't pretty. By just disabling this variable we get the result of not seeing traces that happen between crashes. For sysrq-z, it doesn't get turned on, but the user can always write a '1' to the tracing_on file. If they are using sysrq-z, then they should know about tracing_on. The new code is much easier to read and less error prone. No more deadlock possibility when an NMI triggers here. Reported-by: zhangwei(Jovi) <jovi.zhangwei@huawei.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--kernel/trace/trace.c62
-rw-r--r--kernel/trace/trace_selftest.c9
2 files changed, 31 insertions, 40 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1c8285240152..55a9d0501ee7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5020,36 +5020,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
5020 iter->cpu_file = TRACE_PIPE_ALL_CPU; 5020 iter->cpu_file = TRACE_PIPE_ALL_CPU;
5021} 5021}
5022 5022
5023static void 5023void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
5024__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5025{ 5024{
5026 static arch_spinlock_t ftrace_dump_lock =
5027 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
5028 /* use static because iter can be a bit big for the stack */ 5025 /* use static because iter can be a bit big for the stack */
5029 static struct trace_iterator iter; 5026 static struct trace_iterator iter;
5027 static atomic_t dump_running;
5030 unsigned int old_userobj; 5028 unsigned int old_userobj;
5031 static int dump_ran;
5032 unsigned long flags; 5029 unsigned long flags;
5033 int cnt = 0, cpu; 5030 int cnt = 0, cpu;
5034 5031
5035 /* only one dump */ 5032 /* Only allow one dump user at a time. */
5036 local_irq_save(flags); 5033 if (atomic_inc_return(&dump_running) != 1) {
5037 arch_spin_lock(&ftrace_dump_lock); 5034 atomic_dec(&dump_running);
5038 if (dump_ran) 5035 return;
5039 goto out; 5036 }
5040
5041 dump_ran = 1;
5042 5037
5038 /*
5039 * Always turn off tracing when we dump.
5040 * We don't need to show trace output of what happens
5041 * between multiple crashes.
5042 *
5043 * If the user does a sysrq-z, then they can re-enable
5044 * tracing with echo 1 > tracing_on.
5045 */
5043 tracing_off(); 5046 tracing_off();
5044 5047
5045 /* Did function tracer already get disabled? */ 5048 local_irq_save(flags);
5046 if (ftrace_is_dead()) {
5047 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
5048 printk("# MAY BE MISSING FUNCTION EVENTS\n");
5049 }
5050
5051 if (disable_tracing)
5052 ftrace_kill();
5053 5049
5054 trace_init_global_iter(&iter); 5050 trace_init_global_iter(&iter);
5055 5051
@@ -5082,6 +5078,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5082 5078
5083 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 5079 printk(KERN_TRACE "Dumping ftrace buffer:\n");
5084 5080
5081 /* Did function tracer already get disabled? */
5082 if (ftrace_is_dead()) {
5083 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
5084 printk("# MAY BE MISSING FUNCTION EVENTS\n");
5085 }
5086
5085 /* 5087 /*
5086 * We need to stop all tracing on all CPUS to read the 5088 * We need to stop all tracing on all CPUS to read the
5087 * the next buffer. This is a bit expensive, but is 5089 * the next buffer. This is a bit expensive, but is
@@ -5121,26 +5123,14 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
5121 printk(KERN_TRACE "---------------------------------\n"); 5123 printk(KERN_TRACE "---------------------------------\n");
5122 5124
5123 out_enable: 5125 out_enable:
5124 /* Re-enable tracing if requested */ 5126 trace_flags |= old_userobj;
5125 if (!disable_tracing) {
5126 trace_flags |= old_userobj;
5127 5127
5128 for_each_tracing_cpu(cpu) { 5128 for_each_tracing_cpu(cpu) {
5129 atomic_dec(&iter.tr->data[cpu]->disabled); 5129 atomic_dec(&iter.tr->data[cpu]->disabled);
5130 }
5131 tracing_on();
5132 } 5130 }
5133 5131 atomic_dec(&dump_running);
5134 out:
5135 arch_spin_unlock(&ftrace_dump_lock);
5136 local_irq_restore(flags); 5132 local_irq_restore(flags);
5137} 5133}
5138
5139/* By default: disable tracing after the dump */
5140void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
5141{
5142 __ftrace_dump(true, oops_dump_mode);
5143}
5144EXPORT_SYMBOL_GPL(ftrace_dump); 5134EXPORT_SYMBOL_GPL(ftrace_dump);
5145 5135
5146__init static int tracer_alloc_buffers(void) 5136__init static int tracer_alloc_buffers(void)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 5fc7aa55fbe5..81f6275fc549 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -702,8 +702,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
702/* Maximum number of functions to trace before diagnosing a hang */ 702/* Maximum number of functions to trace before diagnosing a hang */
703#define GRAPH_MAX_FUNC_TEST 100000000 703#define GRAPH_MAX_FUNC_TEST 100000000
704 704
705static void
706__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
707static unsigned int graph_hang_thresh; 705static unsigned int graph_hang_thresh;
708 706
709/* Wrap the real function entry probe to avoid possible hanging */ 707/* Wrap the real function entry probe to avoid possible hanging */
@@ -713,8 +711,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
713 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { 711 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
714 ftrace_graph_stop(); 712 ftrace_graph_stop();
715 printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); 713 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
716 if (ftrace_dump_on_oops) 714 if (ftrace_dump_on_oops) {
717 __ftrace_dump(false, DUMP_ALL); 715 ftrace_dump(DUMP_ALL);
716 /* ftrace_dump() disables tracing */
717 tracing_on();
718 }
718 return 0; 719 return 0;
719 } 720 }
720 721