aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorStephen Boyd2012-12-20 01:39:48 -0600
committerLinus Torvalds2012-12-20 15:50:16 -0600
commitfcc16882ac4532aaa644bff444f0c5d6228ba71e (patch)
tree7104729ed7fd136a26ea47462e716410666aa1f6 /lib
parent787314c35fbb97e02823a1b8eb8cfa58f366cd49 (diff)
downloadkernel-common-fcc16882ac4532aaa644bff444f0c5d6228ba71e.tar.gz
kernel-common-fcc16882ac4532aaa644bff444f0c5d6228ba71e.tar.xz
kernel-common-fcc16882ac4532aaa644bff444f0c5d6228ba71e.zip
lib: atomic64: Initialize locks statically to fix early users
The atomic64 library uses a handful of static spin locks to implement atomic 64-bit operations on architectures without support for atomic 64-bit instructions. Unfortunately, the spinlocks are initialized in a pure initcall and that is too late for the vfs namespace code which wants to use atomic64 operations before the initcall is run. This became a problem as of commit 8823c079ba71: "vfs: Add setns support for the mount namespace". This leads to BUG messages such as: BUG: spinlock bad magic on CPU#0, swapper/0/0 lock: atomic64_lock+0x240/0x400, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0 do_raw_spin_lock+0x158/0x198 _raw_spin_lock_irqsave+0x4c/0x58 atomic64_add_return+0x30/0x5c alloc_mnt_ns.clone.14+0x44/0xac create_mnt_ns+0xc/0x54 mnt_init+0x120/0x1d4 vfs_caches_init+0xe0/0x10c start_kernel+0x29c/0x300 coming out early on during boot when spinlock debugging is enabled. Fix this by initializing the spinlocks statically at compile time. Reported-and-tested-by: Vaibhav Bedia <vaibhav.bedia@ti.com> Tested-by: Tony Lindgren <tony@atomide.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/atomic64.c17
1 files changed, 5 insertions, 12 deletions
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 978537809d8..08a4f068e61 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -31,7 +31,11 @@
31static union { 31static union {
32 raw_spinlock_t lock; 32 raw_spinlock_t lock;
33 char pad[L1_CACHE_BYTES]; 33 char pad[L1_CACHE_BYTES];
34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; 34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
35 [0 ... (NR_LOCKS - 1)] = {
36 .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
37 },
38};
35 39
36static inline raw_spinlock_t *lock_addr(const atomic64_t *v) 40static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
37{ 41{
@@ -173,14 +177,3 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
173 return ret; 177 return ret;
174} 178}
175EXPORT_SYMBOL(atomic64_add_unless); 179EXPORT_SYMBOL(atomic64_add_unless);
176
177static int init_atomic64_lock(void)
178{
179 int i;
180
181 for (i = 0; i < NR_LOCKS; ++i)
182 raw_spin_lock_init(&atomic64_lock[i].lock);
183 return 0;
184}
185
186pure_initcall(init_atomic64_lock);