aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon2012-08-10 09:22:09 -0500
committerGreg Kroah-Hartman2012-10-02 11:47:39 -0500
commitd0179ca85eb702e31c645556f4dc2e58a3af31f4 (patch)
tree51e749eda541082a6bd68a20e36b02d16e27e252 /include
parente3439be1c0b9df0ca9b760ff4861be5d6145da26 (diff)
downloadkernel-common-d0179ca85eb702e31c645556f4dc2e58a3af31f4.tar.gz
kernel-common-d0179ca85eb702e31c645556f4dc2e58a3af31f4.tar.xz
kernel-common-d0179ca85eb702e31c645556f4dc2e58a3af31f4.zip
mutex: Place lock in contended state after fastpath_lock failure
commit 0bce9c46bf3b15f485d82d7e81dabed6ebcc24b1 upstream. ARM recently moved to asm-generic/mutex-xchg.h for its mutex implementation after the previous implementation was found to be missing some crucial memory barriers. However, this has revealed some problems running hackbench on SMP platforms due to the way in which the MUTEX_SPIN_ON_OWNER code operates. The symptoms are that a bunch of hackbench tasks are left waiting on an unlocked mutex and therefore never get woken up to claim it. This boils down to the following sequence of events: Task A Task B Task C Lock value 0 1 1 lock() 0 2 lock() 0 3 spin(A) 0 4 unlock() 1 5 lock() 0 6 cmpxchg(1,0) 0 7 contended() -1 8 lock() 0 9 spin(C) 0 10 unlock() 1 11 cmpxchg(1,0) 0 12 unlock() 1 At this point, the lock is unlocked, but Task B is in an uninterruptible sleep with nobody to wake it up. This patch fixes the problem by ensuring we put the lock into the contended state if we fail to acquire it on the fastpath, ensuring that any blocked waiters are woken up when the mutex is released. Signed-off-by: Will Deacon <will.deacon@arm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Chris Mason <chris.mason@fusionio.com> Cc: Ingo Molnar <mingo@elte.hu> Reviewed-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-6e9lrw2avczr0617fzl5vqb8@git.kernel.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/mutex-xchg.h11
1 files changed, 9 insertions, 2 deletions
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 580a6d35c70..c04e0db8a2d 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -26,7 +26,13 @@ static inline void
26__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) 26__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
27{ 27{
28 if (unlikely(atomic_xchg(count, 0) != 1)) 28 if (unlikely(atomic_xchg(count, 0) != 1))
29 fail_fn(count); 29 /*
30 * We failed to acquire the lock, so mark it contended
31 * to ensure that any waiting tasks are woken up by the
32 * unlock slow path.
33 */
34 if (likely(atomic_xchg(count, -1) != 1))
35 fail_fn(count);
30} 36}
31 37
32/** 38/**
@@ -43,7 +49,8 @@ static inline int
43__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 49__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
44{ 50{
45 if (unlikely(atomic_xchg(count, 0) != 1)) 51 if (unlikely(atomic_xchg(count, 0) != 1))
46 return fail_fn(count); 52 if (likely(atomic_xchg(count, -1) != 1))
53 return fail_fn(count);
47 return 0; 54 return 0;
48} 55}
49 56