]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
btrfs: migrate the block group lookup code
authorJosef Bacik <josef@toxicpanda.com>
Thu, 20 Jun 2019 19:37:45 +0000 (15:37 -0400)
committerDavid Sterba <dsterba@suse.com>
Mon, 9 Sep 2019 12:59:04 +0000 (14:59 +0200)
Move these bits first as they are the easiest to move.  Export two of
the helpers so they can be moved all at once.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
[ minor style updates ]
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/Makefile
fs/btrfs/block-group.c [new file with mode: 0644]
fs/btrfs/block-group.h
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c

index 76a843198bcb6bdbb9976b9e2b87982b6c1d99ea..82200dbca5ac4743aab68f86451579df4657cb71 100644 (file)
@@ -11,7 +11,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
           compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
           reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
           uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
-          block-rsv.o delalloc-space.o
+          block-rsv.o delalloc-space.o block-group.o
 
 btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
 btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
new file mode 100644 (file)
index 0000000..ebe7b1c
--- /dev/null
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "ctree.h"
+#include "block-group.h"
+
+/*
+ * This will return the block group at or after bytenr if contains is 0, else
+ * it will return the block group that contains the bytenr
+ */
+static struct btrfs_block_group_cache *block_group_cache_tree_search(
+               struct btrfs_fs_info *info, u64 bytenr, int contains)
+{
+       struct btrfs_block_group_cache *cache, *ret = NULL;
+       struct rb_node *n;
+       u64 end, start;
+
+       spin_lock(&info->block_group_cache_lock);
+       n = info->block_group_cache_tree.rb_node;
+
+       while (n) {
+               cache = rb_entry(n, struct btrfs_block_group_cache,
+                                cache_node);
+               end = cache->key.objectid + cache->key.offset - 1;
+               start = cache->key.objectid;
+
+               if (bytenr < start) {
+                       if (!contains && (!ret || start < ret->key.objectid))
+                               ret = cache;
+                       n = n->rb_left;
+               } else if (bytenr > start) {
+                       if (contains && bytenr <= end) {
+                               ret = cache;
+                               break;
+                       }
+                       n = n->rb_right;
+               } else {
+                       ret = cache;
+                       break;
+               }
+       }
+       if (ret) {
+               btrfs_get_block_group(ret);
+               if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
+                       info->first_logical_byte = ret->key.objectid;
+       }
+       spin_unlock(&info->block_group_cache_lock);
+
+       return ret;
+}
+
+/*
+ * Return the block group that starts at or after bytenr
+ */
+struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
+               struct btrfs_fs_info *info, u64 bytenr)
+{
+       return block_group_cache_tree_search(info, bytenr, 0);
+}
+
+/*
+ * Return the block group that contains the given bytenr
+ */
+struct btrfs_block_group_cache *btrfs_lookup_block_group(
+               struct btrfs_fs_info *info, u64 bytenr)
+{
+       return block_group_cache_tree_search(info, bytenr, 1);
+}
+
+struct btrfs_block_group_cache *btrfs_next_block_group(
+               struct btrfs_block_group_cache *cache)
+{
+       struct btrfs_fs_info *fs_info = cache->fs_info;
+       struct rb_node *node;
+
+       spin_lock(&fs_info->block_group_cache_lock);
+
+       /* If our block group was removed, we need a full search. */
+       if (RB_EMPTY_NODE(&cache->cache_node)) {
+               const u64 next_bytenr = cache->key.objectid + cache->key.offset;
+
+               spin_unlock(&fs_info->block_group_cache_lock);
+               btrfs_put_block_group(cache);
+               cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
+       }
+       node = rb_next(&cache->cache_node);
+       btrfs_put_block_group(cache);
+       if (node) {
+               cache = rb_entry(node, struct btrfs_block_group_cache,
+                                cache_node);
+               btrfs_get_block_group(cache);
+       } else
+               cache = NULL;
+       spin_unlock(&fs_info->block_group_cache_lock);
+       return cache;
+}
index 054745007519b5b4bc3cc934fab12d66eb8e32ec..87bac0d5ad693311886fe1b785ebd385f69a026b 100644 (file)
@@ -151,4 +151,11 @@ static inline int btrfs_should_fragment_free_space(
 }
 #endif
 
+struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
+               struct btrfs_fs_info *info, u64 bytenr);
+struct btrfs_block_group_cache *btrfs_lookup_block_group(
+               struct btrfs_fs_info *info, u64 bytenr);
+struct btrfs_block_group_cache *btrfs_next_block_group(
+               struct btrfs_block_group_cache *cache);
+
 #endif /* BTRFS_BLOCK_GROUP_H */
index e95fdd1d9dd219fbf49da41f841c92cc45aa9fdd..49ac72c3d0cd70c8b8c0cf065f60aa5306538d67 100644 (file)
@@ -2496,9 +2496,6 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
 int btrfs_exclude_logged_extents(struct extent_buffer *eb);
 int btrfs_cross_ref_exist(struct btrfs_root *root,
                          u64 objectid, u64 offset, u64 bytenr);
-struct btrfs_block_group_cache *btrfs_lookup_block_group(
-                                                struct btrfs_fs_info *info,
-                                                u64 bytenr);
 void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
index f28697131f221b4740779f32a2de5df18d7fb3fd..a454945227cacd58499f64341023bb7b8dd2c8ee 100644 (file)
@@ -133,52 +133,6 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
        return 0;
 }
 
-/*
- * This will return the block group at or after bytenr if contains is 0, else
- * it will return the block group that contains the bytenr
- */
-static struct btrfs_block_group_cache *
-block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
-                             int contains)
-{
-       struct btrfs_block_group_cache *cache, *ret = NULL;
-       struct rb_node *n;
-       u64 end, start;
-
-       spin_lock(&info->block_group_cache_lock);
-       n = info->block_group_cache_tree.rb_node;
-
-       while (n) {
-               cache = rb_entry(n, struct btrfs_block_group_cache,
-                                cache_node);
-               end = cache->key.objectid + cache->key.offset - 1;
-               start = cache->key.objectid;
-
-               if (bytenr < start) {
-                       if (!contains && (!ret || start < ret->key.objectid))
-                               ret = cache;
-                       n = n->rb_left;
-               } else if (bytenr > start) {
-                       if (contains && bytenr <= end) {
-                               ret = cache;
-                               break;
-                       }
-                       n = n->rb_right;
-               } else {
-                       ret = cache;
-                       break;
-               }
-       }
-       if (ret) {
-               btrfs_get_block_group(ret);
-               if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
-                       info->first_logical_byte = ret->key.objectid;
-       }
-       spin_unlock(&info->block_group_cache_lock);
-
-       return ret;
-}
-
 static int add_excluded_extent(struct btrfs_fs_info *fs_info,
                               u64 start, u64 num_bytes)
 {
@@ -673,24 +627,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        return ret;
 }
 
-/*
- * return the block group that starts at or after bytenr
- */
-static struct btrfs_block_group_cache *
-btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
-{
-       return block_group_cache_tree_search(info, bytenr, 0);
-}
-
-/*
- * return the block group that contains the given bytenr
- */
-struct btrfs_block_group_cache *btrfs_lookup_block_group(
-                                                struct btrfs_fs_info *info,
-                                                u64 bytenr)
-{
-       return block_group_cache_tree_search(info, bytenr, 1);
-}
 
 static u64 generic_ref_to_space_flags(struct btrfs_ref *ref)
 {
@@ -3146,34 +3082,6 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
 
 }
 
-static struct btrfs_block_group_cache *next_block_group(
-               struct btrfs_block_group_cache *cache)
-{
-       struct btrfs_fs_info *fs_info = cache->fs_info;
-       struct rb_node *node;
-
-       spin_lock(&fs_info->block_group_cache_lock);
-
-       /* If our block group was removed, we need a full search. */
-       if (RB_EMPTY_NODE(&cache->cache_node)) {
-               const u64 next_bytenr = cache->key.objectid + cache->key.offset;
-
-               spin_unlock(&fs_info->block_group_cache_lock);
-               btrfs_put_block_group(cache);
-               cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
-       }
-       node = rb_next(&cache->cache_node);
-       btrfs_put_block_group(cache);
-       if (node) {
-               cache = rb_entry(node, struct btrfs_block_group_cache,
-                                cache_node);
-               btrfs_get_block_group(cache);
-       } else
-               cache = NULL;
-       spin_unlock(&fs_info->block_group_cache_lock);
-       return cache;
-}
-
 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
                            struct btrfs_trans_handle *trans,
                            struct btrfs_path *path)
@@ -7651,7 +7559,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
                        if (block_group->iref)
                                break;
                        spin_unlock(&block_group->lock);
-                       block_group = next_block_group(block_group);
+                       block_group = btrfs_next_block_group(block_group);
                }
                if (!block_group) {
                        if (last == 0)
@@ -8872,7 +8780,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
                return -EINVAL;
 
        cache = btrfs_lookup_first_block_group(fs_info, range->start);
-       for (; cache; cache = next_block_group(cache)) {
+       for (; cache; cache = btrfs_next_block_group(cache)) {
                if (cache->key.objectid >= range_end) {
                        btrfs_put_block_group(cache);
                        break;