/* * Generic implementation of 64-bit atomics using spinlocks, * useful on processors that don't have 64-bit atomic instructions. * * Copyright © 2009 Paul Mackerras, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include #include #include #include #include #include /* * We use a hashed array of spinlocks to provide exclusive access * to each atomic64_t variable. Since this is expected to used on * systems with small numbers of CPUs (<= 4 or so), we use a * relatively small array of 16 spinlocks to avoid wasting too much * memory on the spinlock array. */ #define NR_LOCKS 16 /* * Ensure each lock is in a separate cacheline. */ static union { raw_spinlock_t lock; char pad[L1_CACHE_BYTES]; } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { [0 ... (NR_LOCKS - 1)] = { .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), }, }; static inline raw_spinlock_t *lock_addr(const atomic64_t *v) { unsigned long addr = (unsigned long) v; addr >>= L1_CACHE_SHIFT; addr ^= (addr >> 8) ^ (addr >> 16); return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; } long long atomic64_read(const atomic64_t *v) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); long long val; raw_spin_lock_irqsave(lock, flags); val = v->counter; raw_spin_unlock_irqrestore(lock, flags); return val; } EXPORT_SYMBOL(atomic64_read); void atomic64_set(atomic64_t *v, long long i) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); raw_spin_lock_irqsave(lock, flags); v->counter = i; raw_spin_unlock_irqrestore(lock, flags); } EXPORT_SYMBOL(atomic64_set); #define ATOMIC64_OP(op, c_op) \ void atomic64_##op(long long a, atomic64_t *v) \ { \ unsigned long flags; \ raw_spinlock_t *lock = lock_addr(v); \ \ raw_spin_lock_irqsave(lock, flags); \ v->counter c_op a; \ raw_spin_unlock_irqrestore(lock, flags); \ } \ EXPORT_SYMBOL(atomic64_##op); #define ATOMIC64_OP_RETURN(op, c_op) \ long long atomic64_##op##_return(long long a, atomic64_t *v) \ { \ unsigned long flags; \ raw_spinlock_t *lock = lock_addr(v); \ long long val; \ \ raw_spin_lock_irqsave(lock, flags); \ val = (v->counter c_op a); \ raw_spin_unlock_irqrestore(lock, flags); \ return val; \ } \ EXPORT_SYMBOL(atomic64_##op##_return); #define ATOMIC64_FETCH_OP(op, c_op) \ long long atomic64_fetch_##op(long long a, atomic64_t *v) \ { \ unsigned long flags; \ raw_spinlock_t *lock = lock_addr(v); \ long long val; \ \ raw_spin_lock_irqsave(lock, flags); \ val = v->counter; \ v->counter c_op a; \ raw_spin_unlock_irqrestore(lock, flags); \ return val; \ } \ EXPORT_SYMBOL(atomic64_fetch_##op); #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ ATOMIC64_OP_RETURN(op, c_op) \ ATOMIC64_FETCH_OP(op, c_op) ATOMIC64_OPS(add, +=) ATOMIC64_OPS(sub, -=) #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ ATOMIC64_OP_RETURN(op, c_op) \ ATOMIC64_FETCH_OP(op, c_op) ATOMIC64_OPS(and, &=) ATOMIC64_OPS(or, |=) ATOMIC64_OPS(xor, ^=) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP long long atomic64_dec_if_positive(atomic64_t *v) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); long long val; raw_spin_lock_irqsave(lock, flags); val = v->counter - 1; if (val >= 0) v->counter = val; raw_spin_unlock_irqrestore(lock, flags); return val; } EXPORT_SYMBOL(atomic64_dec_if_positive); long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); long long val; raw_spin_lock_irqsave(lock, flags); val = v->counter; if (val == o) v->counter = n; raw_spin_unlock_irqrestore(lock, flags); return val; } EXPORT_SYMBOL(atomic64_cmpxchg); long long atomic64_xchg(atomic64_t *v, long long new) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); long long val; raw_spin_lock_irqsave(lock, flags); val = v->counter; v->counter = new; raw_spin_unlock_irqrestore(lock, flags); return val; } EXPORT_SYMBOL(atomic64_xchg); int atomic64_add_unless(atomic64_t *v, long long a, long long u) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); int ret = 0; raw_spin_lock_irqsave(lock, flags); if (v->counter != u) { v->counter += a; ret = 1; } raw_spin_unlock_irqrestore(lock, flags); return ret; } EXPORT_SYMBOL(atomic64_add_unless); it/commit/net/rxrpc/peer_event.c?id=c8f325a59cfc718d13a50fbc746ed9b415c25e92'>c8f325a59cfc718d13a50fbc746ed9b415c25e92 (patch) treed53fbdac9d0781e39a13b2ac6b2bd258cf3b4140 /net/rxrpc/peer_event.c parentbf29bddf0417a4783da3b24e8c9e017ac649326f (diff)
efi/fdt: Avoid FDT manipulation after ExitBootServices()
Some AArch64 UEFI implementations disable the MMU in ExitBootServices(), after which unaligned accesses to RAM are no longer supported. Commit: abfb7b686a3e ("efi/libstub/arm*: Pass latest memory map to the kernel") fixed an issue in the memory map handling of the stub FDT code, but inadvertently created an issue with such firmware, by moving some of the FDT manipulation to after the invocation of ExitBootServices(). Given that the stub's libfdt implementation uses the ordinary, accelerated string functions, which rely on hardware handling of unaligned accesses, manipulating the FDT with the MMU off may result in alignment faults. So fix the situation by moving the update_fdt_memmap() call into the callback function invoked by efi_exit_boot_services() right before it calls the ExitBootServices() UEFI service (which is arguably a better place for it anyway) Note that disabling the MMU in ExitBootServices() is not compliant with the UEFI spec, and carries great risk due to the fact that switching from cached to uncached memory accesses halfway through compiler generated code (i.e., involving a stack) can never be done in a way that is architecturally safe. Fixes: abfb7b686a3e ("efi/libstub/arm*: Pass latest memory map to the kernel") Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Riku Voipio <riku.voipio@linaro.org> Cc: <stable@vger.kernel.org> Cc: mark.rutland@arm.com Cc: linux-efi@vger.kernel.org Cc: matt@codeblueprint.co.uk Cc: leif.lindholm@linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/1485971102-23330-2-git-send-email-ard.biesheuvel@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'net/rxrpc/peer_event.c')