aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorOleg Nesterov2013-04-30 17:28:20 -0500
committerGreg Kroah-Hartman2013-05-07 22:08:23 -0500
commitd76dc4af6d15b4f47dc2d01fd40938ce7675b568 (patch)
treee1d84f4709225cb1ae7f8eb1125682009ebe4b88 /fs
parent88d9e88d1b44a0b3947bce5d973c3ce0865ec868 (diff)
downloadkernel-omap-d76dc4af6d15b4f47dc2d01fd40938ce7675b568.tar.gz
kernel-omap-d76dc4af6d15b4f47dc2d01fd40938ce7675b568.tar.xz
kernel-omap-d76dc4af6d15b4f47dc2d01fd40938ce7675b568.zip
exec: do not abuse ->cred_guard_mutex in threadgroup_lock()
commit e56fb2874015370e3b7f8d85051f6dce26051df9 upstream. threadgroup_lock() takes signal->cred_guard_mutex to ensure that thread_group_leader() is stable. This doesn't look nice, the scope of this lock in do_execve() is huge. And as Dave pointed out this can lead to deadlock, we have the following dependencies: do_execve: cred_guard_mutex -> i_mutex cgroup_mount: i_mutex -> cgroup_mutex attach_task_by_pid: cgroup_mutex -> cred_guard_mutex Change de_thread() to take threadgroup_change_begin() around the switch-the-leader code and change threadgroup_lock() to avoid ->cred_guard_mutex. Note that de_thread() can't sleep with ->group_rwsem held, this can obviously deadlock with the exiting leader if the writer is active, so it does threadgroup_change_end() before schedule(). Reported-by: Dave Jones <davej@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/exec.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 547eaaaeb89c..ac014f1009aa 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -898,11 +898,13 @@ static int de_thread(struct task_struct *tsk)
898 898
899 sig->notify_count = -1; /* for exit_notify() */ 899 sig->notify_count = -1; /* for exit_notify() */
900 for (;;) { 900 for (;;) {
901 threadgroup_change_begin(tsk);
901 write_lock_irq(&tasklist_lock); 902 write_lock_irq(&tasklist_lock);
902 if (likely(leader->exit_state)) 903 if (likely(leader->exit_state))
903 break; 904 break;
904 __set_current_state(TASK_KILLABLE); 905 __set_current_state(TASK_KILLABLE);
905 write_unlock_irq(&tasklist_lock); 906 write_unlock_irq(&tasklist_lock);
907 threadgroup_change_end(tsk);
906 schedule(); 908 schedule();
907 if (unlikely(__fatal_signal_pending(tsk))) 909 if (unlikely(__fatal_signal_pending(tsk)))
908 goto killed; 910 goto killed;
@@ -960,6 +962,7 @@ static int de_thread(struct task_struct *tsk)
960 if (unlikely(leader->ptrace)) 962 if (unlikely(leader->ptrace))
961 __wake_up_parent(leader, leader->parent); 963 __wake_up_parent(leader, leader->parent);
962 write_unlock_irq(&tasklist_lock); 964 write_unlock_irq(&tasklist_lock);
965 threadgroup_change_end(tsk);
963 966
964 release_task(leader); 967 release_task(leader);
965 } 968 }