#ifndef _LIBLOCKDEP_MUTEX_H #define _LIBLOCKDEP_MUTEX_H #include #include "common.h" struct liblockdep_pthread_mutex { pthread_mutex_t mutex; struct lockdep_map dep_map; }; typedef struct liblockdep_pthread_mutex liblockdep_pthread_mutex_t; #define LIBLOCKDEP_PTHREAD_MUTEX_INITIALIZER(mtx) \ (const struct liblockdep_pthread_mutex) { \ .mutex = PTHREAD_MUTEX_INITIALIZER, \ .dep_map = STATIC_LOCKDEP_MAP_INIT(#mtx, &((&(mtx))->dep_map)), \ } static inline int __mutex_init(liblockdep_pthread_mutex_t *lock, const char *name, struct lock_class_key *key, const pthread_mutexattr_t *__mutexattr) { lockdep_init_map(&lock->dep_map, name, key, 0); return pthread_mutex_init(&lock->mutex, __mutexattr); } #define liblockdep_pthread_mutex_init(mutex, mutexattr) \ ({ \ static struct lock_class_key __key; \ \ __mutex_init((mutex), #mutex, &__key, (mutexattr)); \ }) static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock) { lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); return pthread_mutex_lock(&lock->mutex); } static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lock) { lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_); return pthread_mutex_unlock(&lock->mutex); } static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock) { lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0; } static inline int liblockdep_pthread_mutex_destroy(liblockdep_pthread_mutex_t *lock) { return pthread_mutex_destroy(&lock->mutex); } #ifdef __USE_LIBLOCKDEP #define pthread_mutex_t liblockdep_pthread_mutex_t #define pthread_mutex_init liblockdep_pthread_mutex_init #define pthread_mutex_lock liblockdep_pthread_mutex_lock #define pthread_mutex_unlock liblockdep_pthread_mutex_unlock #define pthread_mutex_trylock liblockdep_pthread_mutex_trylock #define pthread_mutex_destroy liblockdep_pthread_mutex_destroy #endif #endif d7df2443cd5f67fc6ee7c05a88e4996e8177f91b'>commitdiff
path: root/net/decnet/Makefile
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2017-02-03 17:10:28 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2017-02-08 23:36:29 +1100
commitd7df2443cd5f67fc6ee7c05a88e4996e8177f91b (patch)
tree098a7c0ca4fceb8a65cb1f693c9d71990388933d /net/decnet/Makefile
parenta0615a16f7d0ceb5804d295203c302d496d8ee91 (diff)
powerpc/mm: Fix spurrious segfaults on radix with autonuma
When autonuma (Automatic NUMA balancing) marks a PTE inaccessible it clears all the protection bits but leave the PTE valid. With the Radix MMU, an attempt at executing from such a PTE will take a fault with bit 35 of SRR1 set "SRR1_ISI_N_OR_G". It is thus incorrect to treat all such faults as errors. We should pass them to handle_mm_fault() for autonuma to deal with. The case of pages that are really not executable is handled by the existing test for VM_EXEC further down. That leaves us with catching the kernel attempts at executing user pages. We can catch that earlier, even before we do find_vma. It is never valid on powerpc for the kernel to take an exec fault to begin with. So fold that test with the existing test for the kernel faulting on kernel addresses to bail out early. Fixes: 1d18ad026844 ("powerpc/mm: Detect instruction fetch denied and report") Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Balbir Singh <bsingharora@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'net/decnet/Makefile')