#ifndef _ASM_GENERIC_LOCAL_H #define _ASM_GENERIC_LOCAL_H #include #include #include /* * A signed long type for operations which are atomic for a single CPU. * Usually used in combination with per-cpu variables. * * This is the default implementation, which uses atomic_long_t. Which is * rather pointless. The whole point behind local_t is that some processors * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs * running on this CPU. local_t allows exploitation of such capabilities. */ /* Implement in terms of atomics. */ /* Don't use typedef: don't want them to be mixed with atomic_t's. */ typedef struct { atomic_long_t a; } local_t; #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } #define local_read(l) atomic_long_read(&(l)->a) #define local_set(l,i) atomic_long_set((&(l)->a),(i)) #define local_inc(l) atomic_long_inc(&(l)->a) #define local_dec(l) atomic_long_dec(&(l)->a) #define local_add(i,l) atomic_long_add((i),(&(l)->a)) #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a)) #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a) #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a) #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a)) #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) #define local_inc_return(l) atomic_long_inc_return(&(l)->a) #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) /* Non-atomic variants, ie. preemption disabled and won't be touched * in interrupt, etc. Some archs can optimize this case well. */ #define __local_inc(l) local_set((l), local_read(l) + 1) #define __local_dec(l) local_set((l), local_read(l) - 1) #define __local_add(i,l) local_set((l), local_read(l) + (i)) #define __local_sub(i,l) local_set((l), local_read(l) - (i)) #endif /* _ASM_GENERIC_LOCAL_H */ ' value='81ddd8c0c5e1cb41184d66567140cb48c53eb3d1'/>
diff options
context:
space:
mode:
authorRabin Vincent <rabinv@axis.com>2017-01-13 15:00:16 +0100
committerSteve French <smfrench@gmail.com>2017-01-14 14:58:29 -0600
commit81ddd8c0c5e1cb41184d66567140cb48c53eb3d1 (patch)
tree5775397665161f633952a13537df36df682f7218 /fs/nfs/blocklayout
parent2eabb8b8d68bc9c7779ba8b04bec8d4f8baed0bc (diff)
cifs: initialize file_info_lock
Reviewed-by: Jeff Layton <jlayton@redhat.com> CC: Stable <stable@vger.kernel.org> file_info_lock is not initalized in initiate_cifs_search(), leading to the following splat after a simple "mount.cifs ... dir && ls dir/": BUG: spinlock bad magic on CPU#0, ls/486 lock: 0xffff880009301110, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0 CPU: 0 PID: 486 Comm: ls Not tainted 4.9.0 #27 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996) ffffc900042f3db0 ffffffff81327533 0000000000000000 ffff880009301110 ffffc900042f3dd0 ffffffff810baf75 ffff880009301110 ffffffff817ae077 ffffc900042f3df0 ffffffff810baff6 ffff880009301110 ffff880008d69900 Call Trace: [<ffffffff81327533>] dump_stack+0x65/0x92 [<ffffffff810baf75>] spin_dump+0x85/0xe0 [<ffffffff810baff6>] spin_bug+0x26/0x30 [<ffffffff810bb159>] do_raw_spin_lock+0xe9/0x130 [<ffffffff8159ad2f>] _raw_spin_lock+0x1f/0x30 [<ffffffff8127e50d>] cifs_closedir+0x4d/0x100 [<ffffffff81181cfd>] __fput+0x5d/0x160 [<ffffffff81181e3e>] ____fput+0xe/0x10 [<ffffffff8109410e>] task_work_run+0x7e/0xa0 [<ffffffff81002512>] exit_to_usermode_loop+0x92/0xa0 [<ffffffff810026f9>] syscall_return_slowpath+0x49/0x50 [<ffffffff8159b484>] entry_SYSCALL_64_fastpath+0xa7/0xa9 Fixes: 3afca265b5f53a0 ("Clarify locking of cifs file and tcon structures and make more granular") Signed-off-by: Rabin Vincent <rabinv@axis.com> Signed-off-by: Steve French <smfrench@gmail.com>
Diffstat (limited to 'fs/nfs/blocklayout')