#ifndef _ASM_GENERIC_LOCAL_H #define _ASM_GENERIC_LOCAL_H #include #include #include /* * A signed long type for operations which are atomic for a single CPU. * Usually used in combination with per-cpu variables. * * This is the default implementation, which uses atomic_long_t. Which is * rather pointless. The whole point behind local_t is that some processors * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs * running on this CPU. local_t allows exploitation of such capabilities. */ /* Implement in terms of atomics. */ /* Don't use typedef: don't want them to be mixed with atomic_t's. */ typedef struct { atomic_long_t a; } local_t; #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } #define local_read(l) atomic_long_read(&(l)->a) #define local_set(l,i) atomic_long_set((&(l)->a),(i)) #define local_inc(l) atomic_long_inc(&(l)->a) #define local_dec(l) atomic_long_dec(&(l)->a) #define local_add(i,l) atomic_long_add((i),(&(l)->a)) #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a)) #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a) #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a) #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a)) #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) #define local_inc_return(l) atomic_long_inc_return(&(l)->a) #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) /* Non-atomic variants, ie. preemption disabled and won't be touched * in interrupt, etc. Some archs can optimize this case well. */ #define __local_inc(l) local_set((l), local_read(l) + 1) #define __local_dec(l) local_set((l), local_read(l) - 1) #define __local_add(i,l) local_set((l), local_read(l) + (i)) #define __local_sub(i,l) local_set((l), local_read(l) - (i)) #endif /* _ASM_GENERIC_LOCAL_H */ -private-remove&id=79c6f448c8b79c321e4a1f31f98194e4f6b6cae7'>diff
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-30 19:27:10 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-01-31 09:13:49 -0500
commit79c6f448c8b79c321e4a1f31f98194e4f6b6cae7 (patch)
tree370efda701f03cccf21e02bb1fdd3b852547d75c /include/sound/hda_register.h
parent0c744ea4f77d72b3dcebb7a8f2684633ec79be88 (diff)
tracing: Fix hwlat kthread migration
The hwlat tracer creates a kernel thread at start of the tracer. It is pinned to a single CPU and will move to the next CPU after each period of running. If the user modifies the migration thread's affinity, it will not change after that happens. The original code created the thread at the first instance it was called, but later was changed to destroy the thread after the tracer was finished, and would not be created until the next instance of the tracer was established. The code that initialized the affinity was only called on the initial instantiation of the tracer. After that, it was not initialized, and the previous affinity did not match the current newly created one, making it appear that the user modified the thread's affinity when it did not, and the thread failed to migrate again. Cc: stable@vger.kernel.org Fixes: 0330f7aa8ee6 ("tracing: Have hwlat trace migrate across tracing_cpumask CPUs") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'include/sound/hda_register.h')