aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTetsuo Handa2017-05-08 17:55:11 -0500
committerLinus Torvalds2017-05-08 19:15:10 -0500
commit780cbcf28781511d2cb235c375127265209796a8 (patch)
tree1ad6a5a7285951266fd955896dace775a62d7940 /kernel
parent31b8cc80776c1b5a17abda6e0bbb5c615b9d90e4 (diff)
downloadkernel-780cbcf28781511d2cb235c375127265209796a8.tar.gz
kernel-780cbcf28781511d2cb235c375127265209796a8.tar.xz
kernel-780cbcf28781511d2cb235c375127265209796a8.zip
kernel/hung_task.c: defer showing held locks
When I was running my testcase which may block hundreds of threads on fs locks, I got lockup due to output from debug_show_all_locks() added by commit b2d4c2edb2e4 ("locking/hung_task: Show all locks"). For example, if 1000 threads were blocked in TASK_UNINTERRUPTIBLE state and 500 out of 1000 threads hold some lock, debug_show_all_locks() from for_each_process_thread() loop will report locks held by 500 threads for 1000 times. This is a too much noise. In order to make sure rcu_lock_break() is called frequently, we should avoid calling debug_show_all_locks() from for_each_process_thread() loop because debug_show_all_locks() effectively calls for_each_process_thread() loop. Let's defer calling debug_show_all_locks() till before panic() or leaving for_each_process_thread() loop. Link: http://lkml.kernel.org/r/1489296834-60436-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Reviewed-by: Vegard Nossum <vegard.nossum@oracle.com> Cc: Ingo Molnar <mingo@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hung_task.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index f0f8e2a8496f..751593ed7c0b 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -43,6 +43,7 @@ unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_
43int __read_mostly sysctl_hung_task_warnings = 10; 43int __read_mostly sysctl_hung_task_warnings = 10;
44 44
45static int __read_mostly did_panic; 45static int __read_mostly did_panic;
46static bool hung_task_show_lock;
46 47
47static struct task_struct *watchdog_task; 48static struct task_struct *watchdog_task;
48 49
@@ -120,12 +121,14 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
120 pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" 121 pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
121 " disables this message.\n"); 122 " disables this message.\n");
122 sched_show_task(t); 123 sched_show_task(t);
123 debug_show_all_locks(); 124 hung_task_show_lock = true;
124 } 125 }
125 126
126 touch_nmi_watchdog(); 127 touch_nmi_watchdog();
127 128
128 if (sysctl_hung_task_panic) { 129 if (sysctl_hung_task_panic) {
130 if (hung_task_show_lock)
131 debug_show_all_locks();
129 trigger_all_cpu_backtrace(); 132 trigger_all_cpu_backtrace();
130 panic("hung_task: blocked tasks"); 133 panic("hung_task: blocked tasks");
131 } 134 }
@@ -172,6 +175,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
172 if (test_taint(TAINT_DIE) || did_panic) 175 if (test_taint(TAINT_DIE) || did_panic)
173 return; 176 return;
174 177
178 hung_task_show_lock = false;
175 rcu_read_lock(); 179 rcu_read_lock();
176 for_each_process_thread(g, t) { 180 for_each_process_thread(g, t) {
177 if (!max_count--) 181 if (!max_count--)
@@ -187,6 +191,8 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
187 } 191 }
188 unlock: 192 unlock:
189 rcu_read_unlock(); 193 rcu_read_unlock();
194 if (hung_task_show_lock)
195 debug_show_all_locks();
190} 196}
191 197
192static long hung_timeout_jiffies(unsigned long last_checked, 198static long hung_timeout_jiffies(unsigned long last_checked,