#ifndef LOCKING_H #define LOCKING_H #include struct spinlock { pthread_spinlock_t lock; }; struct mutexlock { pthread_mutex_t lock; }; #define MUTEXLOCK_INITIALIZER { .lock = PTHREAD_MUTEX_INITIALIZER } struct rwlock { pthread_rwlock_t lock; }; static inline int spinlock_init(struct spinlock *l) { return -pthread_spin_init(&l->lock, 0); } static inline void spinlock_destroy(struct spinlock *l) { pthread_spin_destroy(&l->lock); } static inline void spinlock_lock(struct spinlock *l) { pthread_spin_lock(&l->lock); } static inline void spinlock_unlock(struct spinlock *l) { pthread_spin_unlock(&l->lock); } static inline int mutexlock_init(struct mutexlock *l) { return -pthread_mutex_init(&l->lock, 0); } static inline void mutexlock_destroy(struct mutexlock *l) { pthread_mutex_destroy(&l->lock); } static inline void mutexlock_lock(struct mutexlock *l) { pthread_mutex_lock(&l->lock); } static inline void mutexlock_unlock(struct mutexlock *l) { pthread_mutex_unlock(&l->lock); } static inline int rwlock_init(struct rwlock *l) { return -pthread_rwlock_init(&l->lock, 0); } static inline int rwlock_init2(struct rwlock *l, pthread_rwlockattr_t *attr) { return -pthread_rwlock_init(&l->lock, attr); } static inline void rwlock_destroy(struct rwlock *l) { pthread_rwlock_destroy(&l->lock); } static inline void rwlock_rd_lock(struct rwlock *l) { pthread_rwlock_rdlock(&l->lock); } static inline void rwlock_wr_lock(struct rwlock *l) { pthread_rwlock_wrlock(&l->lock); } static inline void rwlock_unlock(struct rwlock *l) { pthread_rwlock_unlock(&l->lock); } #endif /* LOCKING_H */ ummaryrefslogtreecommitdiff
path: root/net/batman-adv/hash.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 19:03:21 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-01-31 20:22:18 +0100
commitaaaec6fc755447a1d056765b11b24d8ff2b81366 (patch)
treea7f4167960ee1df86739905b6ccdeb95465bfe5f /net/batman-adv/hash.h
parent08d85f3ea99f1eeafc4e8507936190e86a16ee8c (diff)
x86/irq: Make irq activate operations symmetric
The recent commit which prevents double activation of interrupts unearthed interesting code in x86. The code (ab)uses irq_domain_activate_irq() to reconfigure an already activated interrupt. That trips over the prevention code now. Fix it by deactivating the interrupt before activating the new configuration. Fixes: 08d85f3ea99f1 "irqdomain: Avoid activating interrupts more than once" Reported-and-tested-by: Mike Galbraith <efault@gmx.de> Reported-and-tested-by: Borislav Petkov <bp@alien8.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1701311901580.3457@nanos
Diffstat (limited to 'net/batman-adv/hash.h')