#ifndef _ASM_GENERIC_LOCAL64_H #define _ASM_GENERIC_LOCAL64_H #include #include /* * A signed long type for operations which are atomic for a single CPU. * Usually used in combination with per-cpu variables. * * This is the default implementation, which uses atomic64_t. Which is * rather pointless. The whole point behind local64_t is that some processors * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs * running on this CPU. local64_t allows exploitation of such capabilities. */ /* Implement in terms of atomics. */ #if BITS_PER_LONG == 64 #include typedef struct { local_t a; } local64_t; #define LOCAL64_INIT(i) { LOCAL_INIT(i) } #define local64_read(l) local_read(&(l)->a) #define local64_set(l,i) local_set((&(l)->a),(i)) #define local64_inc(l) local_inc(&(l)->a) #define local64_dec(l) local_dec(&(l)->a) #define local64_add(i,l) local_add((i),(&(l)->a)) #define local64_sub(i,l) local_sub((i),(&(l)->a)) #define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) #define local64_dec_and_test(l) local_dec_and_test(&(l)->a) #define local64_inc_and_test(l) local_inc_and_test(&(l)->a) #define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) #define local64_add_return(i, l) local_add_return((i), (&(l)->a)) #define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) #define local64_inc_return(l) local_inc_return(&(l)->a) #define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) #define local64_xchg(l, n) local_xchg((&(l)->a), (n)) #define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) #define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) /* Non-atomic variants, ie. preemption disabled and won't be touched * in interrupt, etc. Some archs can optimize this case well. */ #define __local64_inc(l) local64_set((l), local64_read(l) + 1) #define __local64_dec(l) local64_set((l), local64_read(l) - 1) #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) #else /* BITS_PER_LONG != 64 */ #include /* Don't use typedef: don't want them to be mixed with atomic_t's. */ typedef struct { atomic64_t a; } local64_t; #define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } #define local64_read(l) atomic64_read(&(l)->a) #define local64_set(l,i) atomic64_set((&(l)->a),(i)) #define local64_inc(l) atomic64_inc(&(l)->a) #define local64_dec(l) atomic64_dec(&(l)->a) #define local64_add(i,l) atomic64_add((i),(&(l)->a)) #define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) #define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) #define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) #define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) #define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) #define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) #define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) #define local64_inc_return(l) atomic64_inc_return(&(l)->a) #define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) #define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) #define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) #define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) /* Non-atomic variants, ie. preemption disabled and won't be touched * in interrupt, etc. Some archs can optimize this case well. */ #define __local64_inc(l) local64_set((l), local64_read(l) + 1) #define __local64_dec(l) local64_set((l), local64_read(l) - 1) #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) #endif /* BITS_PER_LONG != 64 */ #endif /* _ASM_GENERIC_LOCAL64_H */ tion value='35'>35space:mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-01-01 12:27:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-01-01 12:27:05 -0800
commit4759d386d55fef452d692bf101167914437e848e (patch)
treee7109c192ec589fcea2a98f9702aa3c0e4009581 /include/net/irda
parent238d1d0f79f619d75c2cc741d6770fb0986aef24 (diff)
parent1db175428ee374489448361213e9c3b749d14900 (diff)
Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull DAX updates from Dan Williams: "The completion of Jan's DAX work for 4.10. As I mentioned in the libnvdimm-for-4.10 pull request, these are some final fixes for the DAX dirty-cacheline-tracking invalidation work that was merged through the -mm, ext4, and xfs trees in -rc1. These patches were prepared prior to the merge window, but we waited for 4.10-rc1 to have a stable merge base after all the prerequisites were merged. Quoting Jan on the overall changes in these patches: "So I'd like all these 6 patches to go for rc2. The first three patches fix invalidation of exceptional DAX entries (a bug which is there for a long time) - without these patches data loss can occur on power failure even though user called fsync(2). The other three patches change locking of DAX faults so that ->iomap_begin() is called in a more relaxed locking context and we are safe to start a transaction there for ext4" These have received a build success notification from the kbuild robot, and pass the latest libnvdimm unit tests. There have not been any -next releases since -rc1, so they have not appeared there" * 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: ext4: Simplify DAX fault path dax: Call ->iomap_begin without entry lock during dax fault dax: Finish fault completely when loading holes dax: Avoid page invalidation races and unnecessary radix tree traversals mm: Invalidate DAX radix tree entries only if appropriate ext2: Return BH_New buffers for zeroed blocks
Diffstat (limited to 'include/net/irda')