#include #include #include #include #include "ctree.h" #include "extent_map.h" #include "compression.h" static struct kmem_cache *extent_map_cache; int __init extent_map_init(void) { extent_map_cache = kmem_cache_create("btrfs_extent_map", sizeof(struct extent_map), 0, SLAB_MEM_SPREAD, NULL); if (!extent_map_cache) return -ENOMEM; return 0; } void extent_map_exit(void) { kmem_cache_destroy(extent_map_cache); } /** * extent_map_tree_init - initialize extent map tree * @tree: tree to initialize * * Initialize the extent tree @tree. Should be called for each new inode * or other user of the extent_map interface. */ void extent_map_tree_init(struct extent_map_tree *tree) { tree->map = RB_ROOT; INIT_LIST_HEAD(&tree->modified_extents); rwlock_init(&tree->lock); } /** * alloc_extent_map - allocate new extent map structure * * Allocate a new extent_map structure. The new structure is * returned with a reference count of one and needs to be * freed using free_extent_map() */ struct extent_map *alloc_extent_map(void) { struct extent_map *em; em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); if (!em) return NULL; RB_CLEAR_NODE(&em->rb_node); em->flags = 0; em->compress_type = BTRFS_COMPRESS_NONE; em->generation = 0; atomic_set(&em->refs, 1); INIT_LIST_HEAD(&em->list); return em; } /** * free_extent_map - drop reference count of an extent_map * @em: extent map being released * * Drops the reference out on @em by one and free the structure * if the reference count hits zero. */ void free_extent_map(struct extent_map *em) { if (!em) return; WARN_ON(atomic_read(&em->refs) == 0); if (atomic_dec_and_test(&em->refs)) { WARN_ON(extent_map_in_tree(em)); WARN_ON(!list_empty(&em->list)); if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) kfree(em->map_lookup); kmem_cache_free(extent_map_cache, em); } } /* simple helper to do math around the end of an extent, handling wrap */ static u64 range_end(u64 start, u64 len) { if (start + len < start) return (u64)-1; return start + len; } static int tree_insert(struct rb_root *root, struct extent_map *em) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct extent_map *entry = NULL; struct rb_node *orig_parent = NULL; u64 end = range_end(em->start, em->len); while (*p) { parent = *p; entry = rb_entry(parent, struct extent_map, rb_node); if (em->start < entry->start) p = &(*p)->rb_left; else if (em->start >= extent_map_end(entry)) p = &(*p)->rb_right; else return -EEXIST; } orig_parent = parent; while (parent && em->start >= extent_map_end(entry)) { parent = rb_next(parent); entry = rb_entry(parent, struct extent_map, rb_node); } if (parent) if (end > entry->start && em->start < extent_map_end(entry)) return -EEXIST; parent = orig_parent; entry = rb_entry(parent, struct extent_map, rb_node); while (parent && em->start < entry->start) { parent = rb_prev(parent); entry = rb_entry(parent, struct extent_map, rb_node); } if (parent) if (end > entry->start && em->start < extent_map_end(entry)) return -EEXIST; rb_link_node(&em->rb_node, orig_parent, p); rb_insert_color(&em->rb_node, root); return 0; } /* * search through the tree for an extent_map with a given offset. If * it can't be found, try to find some neighboring extents */ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, struct rb_node **prev_ret, struct rb_node **next_ret) { struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *orig_prev = NULL; struct extent_map *entry; struct extent_map *prev_entry = NULL; while (n) { entry = rb_entry(n, struct extent_map, rb_node); prev = n; prev_entry = entry; if (offset < entry->start) n = n->rb_left; else if (offset >= extent_map_end(entry)) n = n->rb_right; else return n; } if (prev_ret) { orig_prev = prev; while (prev && offset >= extent_map_end(prev_entry)) { prev = rb_next(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } *prev_ret = prev; prev = orig_prev; } if (next_ret) { prev_entry = rb_entry(prev, struct extent_map, rb_node); while (prev && offset < prev_entry->start) { prev = rb_prev(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } *next_ret = prev; } return NULL; } /* check to see if two extent_map structs are adjacent and safe to merge */ static int mergable_maps(struct extent_map *prev, struct extent_map *next) { if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) return 0; /* * don't merge compressed extents, we need to know their * actual size */ if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) return 0; if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) || test_bit(EXTENT_FLAG_LOGGING, &next->flags)) return 0; /* * We don't want to merge stuff that hasn't been written to the log yet * since it may not reflect exactly what is on disk, and that would be * bad. */ if (!list_empty(&prev->list) || !list_empty(&next->list)) return 0; if (extent_map_end(prev) == next->start && prev->flags == next->flags && prev->bdev == next->bdev && ((next->block_start == EXTENT_MAP_HOLE && prev->block_start == EXTENT_MAP_HOLE) || (next->block_start == EXTENT_MAP_INLINE && prev->block_start == EXTENT_MAP_INLINE) || (next->block_start == EXTENT_MAP_DELALLOC && prev->block_start == EXTENT_MAP_DELALLOC) || (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && next->block_start == extent_map_block_end(prev)))) { return 1; } return 0; } static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) { struct extent_map *merge = NULL; struct rb_node *rb; if (em->start != 0) { rb = rb_prev(&em->rb_node); if (rb) merge = rb_entry(rb, struct extent_map, rb_node); if (rb && mergable_maps(merge, em)) { em->start = merge->start; em->orig_start = merge->orig_start; em->len += merge->len; em->block_len += merge->block_len; em->block_start = merge->block_start; em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; em->mod_start = merge->mod_start; em->generation = max(em->generation, merge->generation); rb_erase(&merge->rb_node, &tree->map); RB_CLEAR_NODE(&merge->rb_node); free_extent_map(merge); } } rb = rb_next(&em->rb_node); if (rb) merge = rb_entry(rb, struct extent_map, rb_node); if (rb && mergable_maps(em, merge)) { em->len += merge->len; em->block_len += merge->block_len; rb_erase(&merge->rb_node, &tree->map); RB_CLEAR_NODE(&merge->rb_node); em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; em->generation = max(em->generation, merge->generation); free_extent_map(merge); } } /** * unpin_extent_cache - unpin an extent from the cache * @tree: tree to unpin the extent in * @start: logical offset in the file * @len: length of the extent * @gen: generation that this extent has been modified in * * Called after an extent has been written to disk properly. Set the generation * to the generation that actually added the file item to the inode so we know * we need to sync this extent when we call fsync(). */ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen) { int ret = 0; struct extent_map *em; bool prealloc = false; write_lock(&tree->lock); em = lookup_extent_mapping(tree, start, len); WARN_ON(!em || em->start != start); if (!em) goto out; em->generation = gen; clear_bit(EXTENT_FLAG_PINNED, &em->flags); em->mod_start = em->start; em->mod_len = em->len; if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) { prealloc = true; clear_bit(EXTENT_FLAG_FILLING, &em->flags); } try_merge_map(tree, em); if (prealloc) { em->mod_start = em->start; em->mod_len = em->len; } free_extent_map(em); out: write_unlock(&tree->lock); return ret; } void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) { clear_bit(EXTENT_FLAG_LOGGING, &em->flags); if (extent_map_in_tree(em)) try_merge_map(tree, em); } static inline void setup_extent_mapping(struct extent_map_tree *tree, struct extent_map *em, int modified) { atomic_inc(&em->refs); em->mod_start = em->start; em->mod_len = em->len; if (modified) list_move(&em->list, &tree->modified_extents); else try_merge_map(tree, em); } /** * add_extent_mapping - add new extent map to the extent tree * @tree: tree to insert new map in * @em: map to insert * * Insert @em into @tree or perform a simple forward/backward merge with * existing mappings. The extent_map struct passed in will be inserted * into the tree directly, with an additional reference taken, or a * reference dropped if the merge attempt was successful. */ int add_extent_mapping(struct extent_map_tree *tree, struct extent_map *em, int modified) { int ret = 0; ret = tree_insert(&tree->map, em); if (ret) goto out; setup_extent_mapping(tree, em, modified); out: return ret; } static struct extent_map * __lookup_extent_mapping(struct extent_map_tree *tree, u64 start, u64 len, int strict) { struct extent_map *em; struct rb_node *rb_node; struct rb_node *prev = NULL; struct rb_node *next = NULL; u64 end = range_end(start, len); rb_node = __tree_search(&tree->map, start, &prev, &next); if (!rb_node) { if (prev) rb_node = prev; else if (next) rb_node = next; else return NULL; } em = rb_entry(rb_node, struct extent_map, rb_node); if (strict && !(end > em->start && start < extent_map_end(em))) return NULL; atomic_inc(&em->refs); return em; } /** * lookup_extent_mapping - lookup extent_map * @tree: tree to lookup in * @start: byte offset to start the search * @len: length of the lookup range * * Find and return the first extent_map struct in @tree that intersects the * [start, len] range. There may be additional objects in the tree that * intersect, so check the object returned carefully to make sure that no * additional lookups are needed. */ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, u64 start, u64 len) { return __lookup_extent_mapping(tree, start, len, 1); } /** * search_extent_mapping - find a nearby extent map * @tree: tree to lookup in * @start: byte offset to start the search * @len: length of the lookup range * * Find and return the first extent_map struct in @tree that intersects the * [start, len] range. * * If one can't be found, any nearby extent may be returned */ struct extent_map *search_extent_mapping(struct extent_map_tree *tree, u64 start, u64 len) { return __lookup_extent_mapping(tree, start, len, 0); } /** * remove_extent_mapping - removes an extent_map from the extent tree * @tree: extent tree to remove from * @em: extent map being removed * * Removes @em from @tree. No reference counts are dropped, and no checks * are done to see if the range is in use */ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) { int ret = 0; WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); rb_erase(&em->rb_node, &tree->map); if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) list_del_init(&em->list); RB_CLEAR_NODE(&em->rb_node); return ret; } void replace_extent_mapping(struct extent_map_tree *tree, struct extent_map *cur, struct extent_map *new, int modified) { WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags)); ASSERT(extent_map_in_tree(cur)); if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags)) list_del_init(&cur->list); rb_replace_node(&cur->rb_node, &new->rb_node, &tree->map); RB_CLEAR_NODE(&cur->rb_node); setup_extent_mapping(tree, new, modified); } pan class='insertions'>+0 Remove duplicated include. Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-03Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-nextDavid S. Miller1-1/+1 Pablo Neira Ayuso says: ==================== Netfilter updates for net-next The following patchset contains Netfilter updates for your net-next tree, they are: 1) Stash ctinfo 3-bit field into pointer to nf_conntrack object from sk_buff so we only access one single cacheline in the conntrack hotpath. Patchset from Florian Westphal. 2) Don't leak pointer to internal structures when exporting x_tables ruleset back to userspace, from Willem DeBruijn. This includes new helper functions to copy data to userspace such as xt_data_to_user() as well as conversions of our ip_tables, ip6_tables and arp_tables clients to use it. Not surprinsingly, ebtables requires an ad-hoc update. There is also a new field in x_tables extensions to indicate the amount of bytes that we copy to userspace. 3) Add nf_log_all_netns sysctl: This new knob allows you to enable logging via nf_log infrastructure for all existing netnamespaces. Given the effort to provide pernet syslog has been discontinued, let's provide a way to restore logging using netfilter kernel logging facilities in trusted environments. Patch from Michal Kubecek. 4) Validate SCTP checksum from conntrack helper, from Davide Caratti. 5) Merge UDPlite conntrack and NAT helpers into UDP, this was mostly a copy&paste from the original helper, from Florian Westphal. 6) Reset netfilter state when duplicating packets, also from Florian. 7) Remove unnecessary check for broadcast in IPv6 in pkttype match and nft_meta, from Liping Zhang. 8) Add missing code to deal with loopback packets from nft_meta when used by the netdev family, also from Liping. 9) Several cleanups on nf_tables, one to remove unnecessary check from the netlink control plane path to add table, set and stateful objects and code consolidation when unregister chain hooks, from Gao Feng. 10) Fix harmless reference counter underflow in IPVS that, however, results in problems with the introduction of the new refcount_t type, from David Windsor. 11) Enable LIBCRC32C from nf_ct_sctp instead of nf_nat_sctp, from Davide Caratti. 12) Missing documentation on nf_tables uapi header, from Liping Zhang. 13) Use rb_entry() helper in xt_connlimit, from Geliang Tang. ==================== Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-03sched: cls_flower: expose priority to offloading netdeviceJiri Pirko1-0/+3 The driver that offloads flower rules needs to know with which priority user inserted the rules. So add this information into offload struct. Signed-off-by: Jiri Pirko <jiri@mellanox.com> Acked-by: Ido Schimmel <idosch@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-03net/sched: act_ife: Change to use ife moduleYotam Gigi2-78/+33 Use the encode/decode functionality from the ife module instead of using implementation inside the act_ife. Reviewed-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: Yotam Gigi <yotamg@mellanox.com> Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com> Signed-off-by: Roman Mashak <mrv@mojatatu.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-03net/sched: act_ife: Unexport ife_tlv_meta_encodeYotam Gigi1-2/+2 As the function ife_tlv_meta_encode is not used by any other module, unexport it and make it static for the act_ife module. Signed-off-by: Yotam Gigi <yotamg@mellanox.com> Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com> Signed-off-by: Roman Mashak <mrv@mojatatu.com> Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-02Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/netDavid S. Miller2-85/+48 All merge conflicts were simple overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net> 2017-02-02skbuff: add and use skb_nfct helperFlorian Westphal1-1/+1 Followup patch renames skb->nfct and changes its type so add a helper to avoid intrusive rename change later. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>