#include #include #include #include #include "omfs.h" unsigned long omfs_count_free(struct super_block *sb) { unsigned int i; unsigned long sum = 0; struct omfs_sb_info *sbi = OMFS_SB(sb); int nbits = sb->s_blocksize * 8; for (i = 0; i < sbi->s_imap_size; i++) sum += nbits - bitmap_weight(sbi->s_imap[i], nbits); return sum; } /* * Counts the run of zero bits starting at bit up to max. * It handles the case where a run might spill over a buffer. * Called with bitmap lock. */ static int count_run(unsigned long **addr, int nbits, int addrlen, int bit, int max) { int count = 0; int x; for (; addrlen > 0; addrlen--, addr++) { x = find_next_bit(*addr, nbits, bit); count += x - bit; if (x < nbits || count > max) return min(count, max); bit = 0; } return min(count, max); } /* * Sets or clears the run of count bits starting with bit. * Called with bitmap lock. */ static int set_run(struct super_block *sb, int map, int nbits, int bit, int count, int set) { int i; int err; struct buffer_head *bh; struct omfs_sb_info *sbi = OMFS_SB(sb); err = -ENOMEM; bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); if (!bh) goto out; for (i = 0; i < count; i++, bit++) { if (bit >= nbits) { bit = 0; map++; mark_buffer_dirty(bh); brelse(bh); bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); if (!bh) goto out; } if (set) { set_bit(bit, sbi->s_imap[map]); set_bit(bit, (unsigned long *)bh->b_data); } else { clear_bit(bit, sbi->s_imap[map]); clear_bit(bit, (unsigned long *)bh->b_data); } } mark_buffer_dirty(bh); brelse(bh); err = 0; out: return err; } /* * Tries to allocate exactly one block. Returns true if successful. */ int omfs_allocate_block(struct super_block *sb, u64 block) { struct buffer_head *bh; struct omfs_sb_info *sbi = OMFS_SB(sb); int bits_per_entry = 8 * sb->s_blocksize; unsigned int map, bit; int ret = 0; u64 tmp; tmp = block; bit = do_div(tmp, bits_per_entry); map = tmp; mutex_lock(&sbi->s_bitmap_lock); if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map])) goto out; if (sbi->s_bitmap_ino > 0) { bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); if (!bh) goto out; set_bit(bit, (unsigned long *)bh->b_data); mark_buffer_dirty(bh); brelse(bh); } ret = 1; out: mutex_unlock(&sbi->s_bitmap_lock); return ret; } /* * Tries to allocate a set of blocks. The request size depends on the * type: for inodes, we must allocate sbi->s_mirrors blocks, and for file * blocks, we try to allocate sbi->s_clustersize, but can always get away * with just one block. */ int omfs_allocate_range(struct super_block *sb, int min_request, int max_request, u64 *return_block, int *return_size) { struct omfs_sb_info *sbi = OMFS_SB(sb); int bits_per_entry = 8 * sb->s_blocksize; int ret = 0; int i, run, bit; mutex_lock(&sbi->s_bitmap_lock); for (i = 0; i < sbi->s_imap_size; i++) { bit = 0; while (bit < bits_per_entry) { bit = find_next_zero_bit(sbi->s_imap[i], bits_per_entry, bit); if (bit == bits_per_entry) break; run = count_run(&sbi->s_imap[i], bits_per_entry, sbi->s_imap_size-i, bit, max_request); if (run >= min_request) goto found; bit += run; } } ret = -ENOSPC; goto out; found: *return_block = (u64) i * bits_per_entry + bit; *return_size = run; ret = set_run(sb, i, bits_per_entry, bit, run, 1); out: mutex_unlock(&sbi->s_bitmap_lock); return ret; } /* * Clears count bits starting at a given block. */ int omfs_clear_range(struct super_block *sb, u64 block, int count) { struct omfs_sb_info *sbi = OMFS_SB(sb); int bits_per_entry = 8 * sb->s_blocksize; u64 tmp; unsigned int map, bit; int ret; tmp = block; bit = do_div(tmp, bits_per_entry); map = tmp; if (map >= sbi->s_imap_size) return 0; mutex_lock(&sbi->s_bitmap_lock); ret = set_run(sb, map, bits_per_entry, bit, count, 0); mutex_unlock(&sbi->s_bitmap_lock); return ret; } /select>space:mode:
authorJack Morgenstein <jackm@dev.mellanox.co.il>2017-01-15 20:15:00 +0200
committerDoug Ledford <dledford@redhat.com>2017-01-27 14:29:04 -0500
commitb4cfe3971f6eab542dd7ecc398bfa1aeec889934 (patch)
treec7ad49d05da0535170c8e7710cd44ae1cecc271f /include/crypto/internal/simd.h
parent2d4b21e0a2913612274a69a3ba1bfee4cffc6e77 (diff)
RDMA/cma: Fix unknown symbol when CONFIG_IPV6 is not enabled
If IPV6 has not been enabled in the underlying kernel, we must avoid calling IPV6 procedures in rdma_cm.ko. This requires using "IS_ENABLED(CONFIG_IPV6)" in "if" statements surrounding any code which calls external IPV6 procedures. In the instance fixed here, procedure cma_bind_addr() called ipv6_addr_type() -- which resulted in calling external procedure __ipv6_addr_type(). Fixes: 6c26a77124ff ("RDMA/cma: fix IPv6 address resolution") Cc: <stable@vger.kernel.org> # v4.2+ Cc: Spencer Baugh <sbaugh@catern.com> Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Reviewed-by: Moni Shoua <monis@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'include/crypto/internal/simd.h')