/* * Generic UP xchg and cmpxchg using interrupt disablement. Does not * support SMP. */ #ifndef __ASM_GENERIC_CMPXCHG_H #define __ASM_GENERIC_CMPXCHG_H #ifdef CONFIG_SMP #error "Cannot use generic cmpxchg on SMP" #endif #include #include #ifndef xchg /* * This function doesn't exist, so you'll get a linker error if * something tries to do an invalidly-sized xchg(). */ extern void __xchg_called_with_bad_pointer(void); static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) { unsigned long ret, flags; switch (size) { case 1: #ifdef __xchg_u8 return __xchg_u8(x, ptr); #else local_irq_save(flags); ret = *(volatile u8 *)ptr; *(volatile u8 *)ptr = x; local_irq_restore(flags); return ret; #endif /* __xchg_u8 */ case 2: #ifdef __xchg_u16 return __xchg_u16(x, ptr); #else local_irq_save(flags); ret = *(volatile u16 *)ptr; *(volatile u16 *)ptr = x; local_irq_restore(flags); return ret; #endif /* __xchg_u16 */ case 4: #ifdef __xchg_u32 return __xchg_u32(x, ptr); #else local_irq_save(flags); ret = *(volatile u32 *)ptr; *(volatile u32 *)ptr = x; local_irq_restore(flags); return ret; #endif /* __xchg_u32 */ #ifdef CONFIG_64BIT case 8: #ifdef __xchg_u64 return __xchg_u64(x, ptr); #else local_irq_save(flags); ret = *(volatile u64 *)ptr; *(volatile u64 *)ptr = x; local_irq_restore(flags); return ret; #endif /* __xchg_u64 */ #endif /* CONFIG_64BIT */ default: __xchg_called_with_bad_pointer(); return x; } } #define xchg(ptr, x) ({ \ ((__typeof__(*(ptr))) \ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ }) #endif /* xchg */ /* * Atomic compare and exchange. */ #include #ifndef cmpxchg_local #define cmpxchg_local(ptr, o, n) ({ \ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ (unsigned long)(n), sizeof(*(ptr)))); \ }) #endif #ifndef cmpxchg64_local #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #endif #define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #endif /* __ASM_GENERIC_CMPXCHG_H */ e='0becc0ae5b42828785b589f686725ff5bc3b9b25'/>
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 09:37:34 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-31 21:47:58 +0100
commit0becc0ae5b42828785b589f686725ff5bc3b9b25 (patch)
treebe6d0e1f37c38ed0a7dd5da2d4b1e93f0fb43101 /net/tipc/subscr.c
parent24c2503255d35c269b67162c397a1a1c1e02f6ce (diff)
x86/mce: Make timer handling more robust
Erik reported that on a preproduction hardware a CMCI storm triggers the BUG_ON in add_timer_on(). The reason is that the per CPU MCE timer is started by the CMCI logic before the MCE CPU hotplug callback starts the timer with add_timer_on(). So the timer is already queued which triggers the BUG. Using add_timer_on() is pretty pointless in this code because the timer is strictlty per CPU, initialized as pinned and all operations which arm the timer happen on the CPU to which the timer belongs. Simplify the whole machinery by using mod_timer() instead of add_timer_on() which avoids the problem because mod_timer() can handle already queued timers. Use __start_timer() everywhere so the earliest armed expiry time is preserved. Reported-by: Erik Veijola <erik.veijola@intel.com> Tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Tony Luck <tony.luck@intel.com> Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701310936080.3457@nanos Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'net/tipc/subscr.c')