/* bit search implementation * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * Copyright (C) 2008 IBM Corporation * 'find_last_bit' is written by Rusty Russell * (Inspired by David Howell's find_next_bit implementation) * * Rewritten by Yury Norov to decrease * size and improve performance, 2015. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include #include #include #include #if !defined(find_next_bit) || !defined(find_next_zero_bit) /* * This is a common helper function for find_next_bit and * find_next_zero_bit. The difference is the "invert" argument, which * is XORed with each fetched word before searching it for one bits. */ static unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start, unsigned long invert) { unsigned long tmp; if (!nbits || start >= nbits) return nbits; tmp = addr[start / BITS_PER_LONG] ^ invert; /* Handle 1st word. */ tmp &= BITMAP_FIRST_WORD_MASK(start); start = round_down(start, BITS_PER_LONG); while (!tmp) { start += BITS_PER_LONG; if (start >= nbits) return nbits; tmp = addr[start / BITS_PER_LONG] ^ invert; } return min(start + __ffs(tmp), nbits); } #endif #ifndef find_next_bit /* * Find the next set bit in a memory region. */ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { return _find_next_bit(addr, size, offset, 0UL); } EXPORT_SYMBOL(find_next_bit); #endif #ifndef find_next_zero_bit unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { return _find_next_bit(addr, size, offset, ~0UL); } EXPORT_SYMBOL(find_next_zero_bit); #endif #ifndef find_first_bit /* * Find the first set bit in a memory region. */ unsigned long find_first_bit(const unsigned long *addr, unsigned long size) { unsigned long idx; for (idx = 0; idx * BITS_PER_LONG < size; idx++) { if (addr[idx]) return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size); } return size; } EXPORT_SYMBOL(find_first_bit); #endif #ifndef find_first_zero_bit /* * Find the first cleared bit in a memory region. */ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) { unsigned long idx; for (idx = 0; idx * BITS_PER_LONG < size; idx++) { if (addr[idx] != ~0UL) return min(idx * BITS_PER_LONG + ffz(addr[idx]), size); } return size; } EXPORT_SYMBOL(find_first_zero_bit); #endif #ifndef find_last_bit unsigned long find_last_bit(const unsigned long *addr, unsigned long size) { if (size) { unsigned long val = BITMAP_LAST_WORD_MASK(size); unsigned long idx = (size-1) / BITS_PER_LONG; do { val &= addr[idx]; if (val) return idx * BITS_PER_LONG + __fls(val); val = ~0ul; } while (idx--); } return size; } EXPORT_SYMBOL(find_last_bit); #endif #ifdef __BIG_ENDIAN /* include/linux/byteorder does not support "unsigned long" type */ static inline unsigned long ext2_swab(const unsigned long y) { #if BITS_PER_LONG == 64 return (unsigned long) __swab64((u64) y); #elif BITS_PER_LONG == 32 return (unsigned long) __swab32((u32) y); #else #error BITS_PER_LONG not defined #endif } #if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le) static unsigned long _find_next_bit_le(const unsigned long *addr, unsigned long nbits, unsigned long start, unsigned long invert) { unsigned long tmp; if (!nbits || start >= nbits) return nbits; tmp = addr[start / BITS_PER_LONG] ^ invert; /* Handle 1st word. */ tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start)); start = round_down(start, BITS_PER_LONG); while (!tmp) { start += BITS_PER_LONG; if (start >= nbits) return nbits; tmp = addr[start / BITS_PER_LONG] ^ invert; } return min(start + __ffs(ext2_swab(tmp)), nbits); } #endif #ifndef find_next_zero_bit_le unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset) { return _find_next_bit_le(addr, size, offset, ~0UL); } EXPORT_SYMBOL(find_next_zero_bit_le); #endif #ifndef find_next_bit_le unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset) { return _find_next_bit_le(addr, size, offset, 0UL); } EXPORT_SYMBOL(find_next_bit_le); #endif #endif /* __BIG_ENDIAN */ on value='1'>ssdiff
authorLinus Torvalds <torvalds@linux-foundation.org>2017-01-30 15:47:19 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-01-30 15:47:19 -0800
commitf9a42e0d58cf0fe3d902e63d4582f2ea4cd2bb8b (patch)
tree4078d3ae27d4ebb85bdcd4d84e9b9a4d059f22bb /net/decnet/Kconfig
parent751321b3dd5040dc5be19bd23f985e80c914621a (diff)
parent54791b276b4000b307339f269d3bf7db877d536f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc fixes from David Miller: "Several small bug fixes and tidies, along with a fix for non-resumable memory errors triggered by userspace" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sparc64: Handle PIO & MEM non-resumable errors. sparc64: Zero pages on allocation for mondo and error queues. sparc: Fixed typo in sstate.c. Replaced panicing with panicking sparc: use symbolic names for tsb indexing
Diffstat (limited to 'net/decnet/Kconfig')