]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - fs/btrfs/extent-tree.c
btrfs: Always trim all unallocated space in btrfs_trim_free_extents
[linux.git] / fs / btrfs / extent-tree.c
index 4fbcd0f7b204051fde92d701060afc6cd0ff1ddc..c7adff343ba9a1ec3feeae0c3e044c3bc65e1749 100644 (file)
@@ -757,12 +757,14 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
 }
 
 static void add_pinned_bytes(struct btrfs_fs_info *fs_info,
-                            struct btrfs_ref *ref)
+                            struct btrfs_ref *ref, int sign)
 {
        struct btrfs_space_info *space_info;
-       s64 num_bytes = -ref->len;
+       s64 num_bytes;
        u64 flags;
 
+       ASSERT(sign == 1 || sign == -1);
+       num_bytes = sign * ref->len;
        if (ref->type == BTRFS_REF_METADATA) {
                if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
                        flags = BTRFS_BLOCK_GROUP_SYSTEM;
@@ -1705,7 +1707,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
        type = extent_ref_type(parent, owner);
        size = btrfs_extent_inline_ref_size(type);
 
-       btrfs_extend_item(fs_info, path, size);
+       btrfs_extend_item(path, size);
 
        ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
        refs = btrfs_extent_refs(leaf, ei);
@@ -1780,7 +1782,6 @@ void update_inline_extent_backref(struct btrfs_path *path,
                                  int *last_ref)
 {
        struct extent_buffer *leaf = path->nodes[0];
-       struct btrfs_fs_info *fs_info = leaf->fs_info;
        struct btrfs_extent_item *ei;
        struct btrfs_extent_data_ref *dref = NULL;
        struct btrfs_shared_data_ref *sref = NULL;
@@ -1835,7 +1836,7 @@ void update_inline_extent_backref(struct btrfs_path *path,
                        memmove_extent_buffer(leaf, ptr, ptr + size,
                                              end - ptr - size);
                item_size -= size;
-               btrfs_truncate_item(fs_info, path, item_size, 1);
+               btrfs_truncate_item(path, item_size, 1);
        }
        btrfs_mark_buffer_dirty(leaf);
 }
@@ -2043,35 +2044,28 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
 
 /* Can return -ENOMEM */
 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
-                        struct btrfs_root *root,
-                        u64 bytenr, u64 num_bytes, u64 parent,
-                        u64 root_objectid, u64 owner, u64 offset)
+                        struct btrfs_ref *generic_ref)
 {
-       struct btrfs_fs_info *fs_info = root->fs_info;
-       struct btrfs_ref generic_ref = { 0 };
+       struct btrfs_fs_info *fs_info = trans->fs_info;
        int old_ref_mod, new_ref_mod;
        int ret;
 
-       BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
-              root_objectid == BTRFS_TREE_LOG_OBJECTID);
+       ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
+              generic_ref->action);
+       BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
+              generic_ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID);
 
-       btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_REF, bytenr,
-                              num_bytes, parent);
-       generic_ref.real_root = root->root_key.objectid;
-       if (owner < BTRFS_FIRST_FREE_OBJECTID) {
-               btrfs_init_tree_ref(&generic_ref, (int)owner, root_objectid);
-               ret = btrfs_add_delayed_tree_ref(trans, &generic_ref,
+       if (generic_ref->type == BTRFS_REF_METADATA)
+               ret = btrfs_add_delayed_tree_ref(trans, generic_ref,
                                NULL, &old_ref_mod, &new_ref_mod);
-       } else {
-               btrfs_init_data_ref(&generic_ref, root_objectid, owner, offset);
-               ret = btrfs_add_delayed_data_ref(trans, &generic_ref, 0,
+       else
+               ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0,
                                                 &old_ref_mod, &new_ref_mod);
-       }
 
-       btrfs_ref_tree_mod(fs_info, &generic_ref);
+       btrfs_ref_tree_mod(fs_info, generic_ref);
 
        if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
-               add_pinned_bytes(fs_info, &generic_ref);
+               add_pinned_bytes(fs_info, generic_ref, -1);
 
        return ret;
 }
@@ -2941,7 +2935,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
-                               struct btrfs_fs_info *fs_info,
                                u64 bytenr, u64 num_bytes, u64 flags,
                                int level, int is_data)
 {
@@ -2958,8 +2951,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
        extent_op->is_data = is_data ? true : false;
        extent_op->level = level;
 
-       ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
-                                         num_bytes, extent_op);
+       ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
        if (ret)
                btrfs_free_delayed_extent_op(extent_op);
        return ret;
@@ -3151,7 +3143,10 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
        u32 nritems;
        struct btrfs_key key;
        struct btrfs_file_extent_item *fi;
+       struct btrfs_ref generic_ref = { 0 };
+       bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC);
        int i;
+       int action;
        int level;
        int ret = 0;
 
@@ -3169,6 +3164,10 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
                parent = buf->start;
        else
                parent = 0;
+       if (inc)
+               action = BTRFS_ADD_DELAYED_REF;
+       else
+               action = BTRFS_DROP_DELAYED_REF;
 
        for (i = 0; i < nritems; i++) {
                if (level == 0) {
@@ -3186,27 +3185,30 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
 
                        num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
                        key.offset -= btrfs_file_extent_offset(buf, fi);
+                       btrfs_init_generic_ref(&generic_ref, action, bytenr,
+                                              num_bytes, parent);
+                       generic_ref.real_root = root->root_key.objectid;
+                       btrfs_init_data_ref(&generic_ref, ref_root, key.objectid,
+                                           key.offset);
+                       generic_ref.skip_qgroup = for_reloc;
                        if (inc)
-                               ret = btrfs_inc_extent_ref(trans, root, bytenr,
-                                               num_bytes, parent, ref_root,
-                                               key.objectid, key.offset);
+                               ret = btrfs_inc_extent_ref(trans, &generic_ref);
                        else
-                               ret = btrfs_free_extent(trans, root, bytenr,
-                                               num_bytes, parent, ref_root,
-                                               key.objectid, key.offset);
+                               ret = btrfs_free_extent(trans, &generic_ref);
                        if (ret)
                                goto fail;
                } else {
                        bytenr = btrfs_node_blockptr(buf, i);
                        num_bytes = fs_info->nodesize;
+                       btrfs_init_generic_ref(&generic_ref, action, bytenr,
+                                              num_bytes, parent);
+                       generic_ref.real_root = root->root_key.objectid;
+                       btrfs_init_tree_ref(&generic_ref, level - 1, ref_root);
+                       generic_ref.skip_qgroup = for_reloc;
                        if (inc)
-                               ret = btrfs_inc_extent_ref(trans, root, bytenr,
-                                               num_bytes, parent, ref_root,
-                                               level - 1, 0);
+                               ret = btrfs_inc_extent_ref(trans, &generic_ref);
                        else
-                               ret = btrfs_free_extent(trans, root, bytenr,
-                                               num_bytes, parent, ref_root,
-                                               level - 1, 0);
+                               ret = btrfs_free_extent(trans, &generic_ref);
                        if (ret)
                                goto fail;
                }
@@ -3882,8 +3884,7 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
                                    info->space_info_kobj, "%s",
                                    alloc_name(space_info->flags));
        if (ret) {
-               percpu_counter_destroy(&space_info->total_bytes_pinned);
-               kfree(space_info);
+               kobject_put(&space_info->kobj);
                return ret;
        }
 
@@ -4633,6 +4634,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
        struct btrfs_space_info *space_info;
        struct btrfs_trans_handle *trans;
        u64 delalloc_bytes;
+       u64 dio_bytes;
        u64 async_pages;
        u64 items;
        long time_left;
@@ -4648,7 +4650,8 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
 
        delalloc_bytes = percpu_counter_sum_positive(
                                                &fs_info->delalloc_bytes);
-       if (delalloc_bytes == 0) {
+       dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
+       if (delalloc_bytes == 0 && dio_bytes == 0) {
                if (trans)
                        return;
                if (wait_ordered)
@@ -4656,8 +4659,16 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
                return;
        }
 
+       /*
+        * If we are doing more ordered than delalloc we need to just wait on
+        * ordered extents, otherwise we'll waste time trying to flush delalloc
+        * that likely won't give us the space back we need.
+        */
+       if (dio_bytes > delalloc_bytes)
+               wait_ordered = true;
+
        loops = 0;
-       while (delalloc_bytes && loops < 3) {
+       while ((delalloc_bytes || dio_bytes) && loops < 3) {
                nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
 
                /*
@@ -4707,6 +4718,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
                }
                delalloc_bytes = percpu_counter_sum_positive(
                                                &fs_info->delalloc_bytes);
+               dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
        }
 }
 
@@ -5704,85 +5716,6 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
        return ret;
 }
 
-static void calc_refill_bytes(struct btrfs_block_rsv *block_rsv,
-                               u64 *metadata_bytes, u64 *qgroup_bytes)
-{
-       *metadata_bytes = 0;
-       *qgroup_bytes = 0;
-
-       spin_lock(&block_rsv->lock);
-       if (block_rsv->reserved < block_rsv->size)
-               *metadata_bytes = block_rsv->size - block_rsv->reserved;
-       if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size)
-               *qgroup_bytes = block_rsv->qgroup_rsv_size -
-                       block_rsv->qgroup_rsv_reserved;
-       spin_unlock(&block_rsv->lock);
-}
-
-/**
- * btrfs_inode_rsv_refill - refill the inode block rsv.
- * @inode - the inode we are refilling.
- * @flush - the flushing restriction.
- *
- * Essentially the same as btrfs_block_rsv_refill, except it uses the
- * block_rsv->size as the minimum size.  We'll either refill the missing amount
- * or return if we already have enough space.  This will also handle the reserve
- * tracepoint for the reserved amount.
- */
-static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
-                                 enum btrfs_reserve_flush_enum flush)
-{
-       struct btrfs_root *root = inode->root;
-       struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
-       u64 num_bytes, last = 0;
-       u64 qgroup_num_bytes;
-       int ret = -ENOSPC;
-
-       calc_refill_bytes(block_rsv, &num_bytes, &qgroup_num_bytes);
-       if (num_bytes == 0)
-               return 0;
-
-       do {
-               ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes,
-                                                        true);
-               if (ret)
-                       return ret;
-               ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
-               if (ret) {
-                       btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
-                       last = num_bytes;
-                       /*
-                        * If we are fragmented we can end up with a lot of
-                        * outstanding extents which will make our size be much
-                        * larger than our reserved amount.
-                        *
-                        * If the reservation happens here, it might be very
-                        * big though not needed in the end, if the delalloc
-                        * flushing happens.
-                        *
-                        * If this is the case try and do the reserve again.
-                        */
-                       if (flush == BTRFS_RESERVE_FLUSH_ALL)
-                               calc_refill_bytes(block_rsv, &num_bytes,
-                                                  &qgroup_num_bytes);
-                       if (num_bytes == 0)
-                               return 0;
-               }
-       } while (ret && last != num_bytes);
-
-       if (!ret) {
-               block_rsv_add_bytes(block_rsv, num_bytes, false);
-               trace_btrfs_space_reservation(root->fs_info, "delalloc",
-                                             btrfs_ino(inode), num_bytes, 1);
-
-               /* Don't forget to increase qgroup_rsv_reserved */
-               spin_lock(&block_rsv->lock);
-               block_rsv->qgroup_rsv_reserved += qgroup_num_bytes;
-               spin_unlock(&block_rsv->lock);
-       }
-       return ret;
-}
-
 static u64 __btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
                                     struct btrfs_block_rsv *block_rsv,
                                     u64 num_bytes, u64 *qgroup_to_release)
@@ -6083,9 +6016,25 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
        spin_unlock(&block_rsv->lock);
 }
 
+static void calc_inode_reservations(struct btrfs_fs_info *fs_info,
+                                   u64 num_bytes, u64 *meta_reserve,
+                                   u64 *qgroup_reserve)
+{
+       u64 nr_extents = count_max_extents(num_bytes);
+       u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, num_bytes);
+
+       /* We add one for the inode update at finish ordered time */
+       *meta_reserve = btrfs_calc_trans_metadata_size(fs_info,
+                                               nr_extents + csum_leaves + 1);
+       *qgroup_reserve = nr_extents * fs_info->nodesize;
+}
+
 int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
 {
-       struct btrfs_fs_info *fs_info = inode->root->fs_info;
+       struct btrfs_root *root = inode->root;
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
+       u64 meta_reserve, qgroup_reserve;
        unsigned nr_extents;
        enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
        int ret = 0;
@@ -6115,7 +6064,31 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
 
        num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
 
-       /* Add our new extents and calculate the new rsv size. */
+       /*
+        * We always want to do it this way, every other way is wrong and ends
+        * in tears.  Pre-reserving the amount we are going to add will always
+        * be the right way, because otherwise if we have enough parallelism we
+        * could end up with thousands of inodes all holding little bits of
+        * reservations they were able to make previously and the only way to
+        * reclaim that space is to ENOSPC out the operations and clear
+        * everything out and try again, which is bad.  This way we just
+        * over-reserve slightly, and clean up the mess when we are done.
+        */
+       calc_inode_reservations(fs_info, num_bytes, &meta_reserve,
+                               &qgroup_reserve);
+       ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true);
+       if (ret)
+               goto out_fail;
+       ret = reserve_metadata_bytes(root, block_rsv, meta_reserve, flush);
+       if (ret)
+               goto out_qgroup;
+
+       /*
+        * Now we need to update our outstanding extents and csum bytes _first_
+        * and then add the reservation to the block_rsv.  This keeps us from
+        * racing with an ordered completion or some such that would think it
+        * needs to free the reservation we just made.
+        */
        spin_lock(&inode->lock);
        nr_extents = count_max_extents(num_bytes);
        btrfs_mod_outstanding_extents(inode, nr_extents);
@@ -6123,22 +6096,21 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
        btrfs_calculate_inode_block_rsv_size(fs_info, inode);
        spin_unlock(&inode->lock);
 
-       ret = btrfs_inode_rsv_refill(inode, flush);
-       if (unlikely(ret))
-               goto out_fail;
+       /* Now we can safely add our space to our block rsv */
+       block_rsv_add_bytes(block_rsv, meta_reserve, false);
+       trace_btrfs_space_reservation(root->fs_info, "delalloc",
+                                     btrfs_ino(inode), meta_reserve, 1);
+
+       spin_lock(&block_rsv->lock);
+       block_rsv->qgroup_rsv_reserved += qgroup_reserve;
+       spin_unlock(&block_rsv->lock);
 
        if (delalloc_lock)
                mutex_unlock(&inode->delalloc_mutex);
        return 0;
-
+out_qgroup:
+       btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
 out_fail:
-       spin_lock(&inode->lock);
-       nr_extents = count_max_extents(num_bytes);
-       btrfs_mod_outstanding_extents(inode, -nr_extents);
-       inode->csum_bytes -= num_bytes;
-       btrfs_calculate_inode_block_rsv_size(fs_info, inode);
-       spin_unlock(&inode->lock);
-
        btrfs_inode_rsv_release(inode, true);
        if (delalloc_lock)
                mutex_unlock(&inode->delalloc_mutex);
@@ -7219,7 +7191,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
        }
 out:
        if (pin)
-               add_pinned_bytes(fs_info, &generic_ref);
+               add_pinned_bytes(fs_info, &generic_ref, 1);
 
        if (last_ref) {
                /*
@@ -7231,47 +7203,43 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 }
 
 /* Can return -ENOMEM */
-int btrfs_free_extent(struct btrfs_trans_handle *trans,
-                     struct btrfs_root *root,
-                     u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
-                     u64 owner, u64 offset)
+int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
 {
-       struct btrfs_fs_info *fs_info = root->fs_info;
-       struct btrfs_ref generic_ref = { 0 };
+       struct btrfs_fs_info *fs_info = trans->fs_info;
        int old_ref_mod, new_ref_mod;
        int ret;
 
        if (btrfs_is_testing(fs_info))
                return 0;
 
-       btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF, bytenr,
-                              num_bytes, parent);
-       generic_ref.real_root = root->root_key.objectid;
        /*
         * tree log blocks never actually go into the extent allocation
         * tree, just update pinning info and exit early.
         */
-       if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
-               WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
+       if ((ref->type == BTRFS_REF_METADATA &&
+            ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) ||
+           (ref->type == BTRFS_REF_DATA &&
+            ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) {
                /* unlocks the pinned mutex */
-               btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
+               btrfs_pin_extent(fs_info, ref->bytenr, ref->len, 1);
                old_ref_mod = new_ref_mod = 0;
                ret = 0;
-       } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
-               btrfs_init_tree_ref(&generic_ref, (int)owner, root_objectid);
-               ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL,
+       } else if (ref->type == BTRFS_REF_METADATA) {
+               ret = btrfs_add_delayed_tree_ref(trans, ref, NULL,
                                                 &old_ref_mod, &new_ref_mod);
        } else {
-               btrfs_init_data_ref(&generic_ref, root_objectid, owner, offset);
-               ret = btrfs_add_delayed_data_ref(trans, &generic_ref, 0,
+               ret = btrfs_add_delayed_data_ref(trans, ref, 0,
                                                 &old_ref_mod, &new_ref_mod);
        }
 
-       if (root_objectid != BTRFS_TREE_LOG_OBJECTID)
-               btrfs_ref_tree_mod(fs_info, &generic_ref);
+       if (!((ref->type == BTRFS_REF_METADATA &&
+              ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) ||
+             (ref->type == BTRFS_REF_DATA &&
+              ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)))
+               btrfs_ref_tree_mod(fs_info, ref);
 
        if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
-               add_pinned_bytes(fs_info, &generic_ref);
+               add_pinned_bytes(fs_info, ref, 1);
 
        return ret;
 }
@@ -8807,7 +8775,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
                BUG_ON(ret); /* -ENOMEM */
                ret = btrfs_dec_ref(trans, root, eb, 0);
                BUG_ON(ret); /* -ENOMEM */
-               ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
+               ret = btrfs_set_disk_extent_flags(trans, eb->start,
                                                  eb->len, flag,
                                                  btrfs_header_level(eb), 0);
                BUG_ON(ret); /* -ENOMEM */
@@ -8876,6 +8844,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
        u64 parent;
        struct btrfs_key key;
        struct btrfs_key first_key;
+       struct btrfs_ref ref = { 0 };
        struct extent_buffer *next;
        int level = wc->level;
        int reada = 0;
@@ -9048,9 +9017,10 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
                wc->drop_level = level;
                find_next_key(path, level, &wc->drop_progress);
 
-               ret = btrfs_free_extent(trans, root, bytenr, fs_info->nodesize,
-                                       parent, root->root_key.objectid,
-                                       level - 1, 0);
+               btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
+                                      fs_info->nodesize, parent);
+               btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid);
+               ret = btrfs_free_extent(trans, &ref);
                if (ret)
                        goto out_unlock;
        }
@@ -9140,11 +9110,13 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                        else
                                ret = btrfs_dec_ref(trans, root, eb, 0);
                        BUG_ON(ret); /* -ENOMEM */
-                       ret = btrfs_qgroup_trace_leaf_items(trans, eb);
-                       if (ret) {
-                               btrfs_err_rl(fs_info,
-                                            "error %d accounting leaf items. Quota is out of sync, rescan required.",
+                       if (is_fstree(root->root_key.objectid)) {
+                               ret = btrfs_qgroup_trace_leaf_items(trans, eb);
+                               if (ret) {
+                                       btrfs_err_rl(fs_info,
+       "error %d accounting leaf items, quota is out of sync, rescan required",
                                             ret);
+                               }
                        }
                }
                /* make block locked assertion in btrfs_clean_tree_block happy */
@@ -11165,13 +11137,11 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
  * it while performing the free space search since we have already
  * held back allocations.
  */
-static int btrfs_trim_free_extents(struct btrfs_device *device,
-                                  struct fstrim_range *range, u64 *trimmed)
+static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
 {
-       u64 start, len = 0, end = 0;
+       u64 start = SZ_1M, len = 0, end = 0;
        int ret;
 
-       start = max_t(u64, range->start, SZ_1M);
        *trimmed = 0;
 
        /* Discard not supported = nothing to do. */
@@ -11214,22 +11184,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
                        break;
                }
 
-               /* Keep going until we satisfy minlen or reach end of space */
-               if (len < range->minlen) {
-                       mutex_unlock(&fs_info->chunk_mutex);
-                       start += len;
-                       continue;
-               }
-
-               /* If we are out of the passed range break */
-               if (start > range->start + range->len - 1) {
-                       mutex_unlock(&fs_info->chunk_mutex);
-                       break;
-               }
-
-               start = max(range->start, start);
-               len = min(range->len, len);
-
                ret = btrfs_issue_discard(device->bdev, start, len,
                                          &bytes);
                if (!ret)
@@ -11244,10 +11198,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
                start += len;
                *trimmed += bytes;
 
-               /* We've trimmed enough */
-               if (*trimmed >= range->len)
-                       break;
-
                if (fatal_signal_pending(current)) {
                        ret = -ERESTARTSYS;
                        break;
@@ -11331,7 +11281,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
        mutex_lock(&fs_info->fs_devices->device_list_mutex);
        devices = &fs_info->fs_devices->devices;
        list_for_each_entry(device, devices, dev_list) {
-               ret = btrfs_trim_free_extents(device, range, &group_trimmed);
+               ret = btrfs_trim_free_extents(device, &group_trimmed);
                if (ret) {
                        dev_failed++;
                        dev_ret = ret;