return NULL;
}
-static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
- bool metadata, u64 root_objectid)
+static void add_pinned_bytes(struct btrfs_fs_info *fs_info,
+ struct btrfs_ref *ref, int sign)
{
struct btrfs_space_info *space_info;
+ s64 num_bytes;
u64 flags;
- if (metadata) {
- if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
+ ASSERT(sign == 1 || sign == -1);
+ num_bytes = sign * ref->len;
+ if (ref->type == BTRFS_REF_METADATA) {
+ if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
flags = BTRFS_BLOCK_GROUP_SYSTEM;
else
flags = BTRFS_BLOCK_GROUP_METADATA;
type = extent_ref_type(parent, owner);
size = btrfs_extent_inline_ref_size(type);
- btrfs_extend_item(fs_info, path, size);
+ btrfs_extend_item(path, size);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
int *last_ref)
{
struct extent_buffer *leaf = path->nodes[0];
- struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_extent_item *ei;
struct btrfs_extent_data_ref *dref = NULL;
struct btrfs_shared_data_ref *sref = NULL;
memmove_extent_buffer(leaf, ptr, ptr + size,
end - ptr - size);
item_size -= size;
- btrfs_truncate_item(fs_info, path, item_size, 1);
+ btrfs_truncate_item(path, item_size, 1);
}
btrfs_mark_buffer_dirty(leaf);
}
/* Can return -ENOMEM */
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 bytenr, u64 num_bytes, u64 parent,
- u64 root_objectid, u64 owner, u64 offset)
+ struct btrfs_ref *generic_ref)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_ref generic_ref = { 0 };
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int old_ref_mod, new_ref_mod;
int ret;
- BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
- root_objectid == BTRFS_TREE_LOG_OBJECTID);
+ ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
+ generic_ref->action);
+ BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
+ generic_ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID);
- btrfs_ref_tree_mod(root, bytenr, num_bytes, parent, root_objectid,
- owner, offset, BTRFS_ADD_DELAYED_REF);
-
- btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_REF, bytenr,
- num_bytes, parent);
- generic_ref.real_root = root->root_key.objectid;
- if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- btrfs_init_tree_ref(&generic_ref, (int)owner, root_objectid);
- ret = btrfs_add_delayed_tree_ref(trans, &generic_ref,
+ if (generic_ref->type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, generic_ref,
NULL, &old_ref_mod, &new_ref_mod);
- } else {
- ret = btrfs_add_delayed_data_ref(trans, bytenr,
- num_bytes, parent,
- root_objectid, owner, offset,
- 0, BTRFS_ADD_DELAYED_REF,
+ else
+ ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0,
&old_ref_mod, &new_ref_mod);
- }
- if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0) {
- bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
+ btrfs_ref_tree_mod(fs_info, generic_ref);
- add_pinned_bytes(fs_info, -num_bytes, metadata, root_objectid);
- }
+ if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
+ add_pinned_bytes(fs_info, generic_ref, -1);
return ret;
}
}
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 flags,
int level, int is_data)
{
extent_op->is_data = is_data ? true : false;
extent_op->level = level;
- ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
- num_bytes, extent_op);
+ ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
if (ret)
btrfs_free_delayed_extent_op(extent_op);
return ret;
u32 nritems;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
+ struct btrfs_ref generic_ref = { 0 };
+ bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC);
int i;
+ int action;
int level;
int ret = 0;
parent = buf->start;
else
parent = 0;
+ if (inc)
+ action = BTRFS_ADD_DELAYED_REF;
+ else
+ action = BTRFS_DROP_DELAYED_REF;
for (i = 0; i < nritems; i++) {
if (level == 0) {
num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
key.offset -= btrfs_file_extent_offset(buf, fi);
+ btrfs_init_generic_ref(&generic_ref, action, bytenr,
+ num_bytes, parent);
+ generic_ref.real_root = root->root_key.objectid;
+ btrfs_init_data_ref(&generic_ref, ref_root, key.objectid,
+ key.offset);
+ generic_ref.skip_qgroup = for_reloc;
if (inc)
- ret = btrfs_inc_extent_ref(trans, root, bytenr,
- num_bytes, parent, ref_root,
- key.objectid, key.offset);
+ ret = btrfs_inc_extent_ref(trans, &generic_ref);
else
- ret = btrfs_free_extent(trans, root, bytenr,
- num_bytes, parent, ref_root,
- key.objectid, key.offset);
+ ret = btrfs_free_extent(trans, &generic_ref);
if (ret)
goto fail;
} else {
bytenr = btrfs_node_blockptr(buf, i);
num_bytes = fs_info->nodesize;
+ btrfs_init_generic_ref(&generic_ref, action, bytenr,
+ num_bytes, parent);
+ generic_ref.real_root = root->root_key.objectid;
+ btrfs_init_tree_ref(&generic_ref, level - 1, ref_root);
+ generic_ref.skip_qgroup = for_reloc;
if (inc)
- ret = btrfs_inc_extent_ref(trans, root, bytenr,
- num_bytes, parent, ref_root,
- level - 1, 0);
+ ret = btrfs_inc_extent_ref(trans, &generic_ref);
else
- ret = btrfs_free_extent(trans, root, bytenr,
- num_bytes, parent, ref_root,
- level - 1, 0);
+ ret = btrfs_free_extent(trans, &generic_ref);
if (ret)
goto fail;
}
info->space_info_kobj, "%s",
alloc_name(space_info->flags));
if (ret) {
- percpu_counter_destroy(&space_info->total_bytes_pinned);
- kfree(space_info);
+ kobject_put(&space_info->kobj);
return ret;
}
struct btrfs_space_info *space_info;
struct btrfs_trans_handle *trans;
u64 delalloc_bytes;
+ u64 dio_bytes;
u64 async_pages;
u64 items;
long time_left;
delalloc_bytes = percpu_counter_sum_positive(
&fs_info->delalloc_bytes);
- if (delalloc_bytes == 0) {
+ dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
+ if (delalloc_bytes == 0 && dio_bytes == 0) {
if (trans)
return;
if (wait_ordered)
return;
}
+ /*
+ * If we are doing more ordered than delalloc we need to just wait on
+ * ordered extents, otherwise we'll waste time trying to flush delalloc
+ * that likely won't give us the space back we need.
+ */
+ if (dio_bytes > delalloc_bytes)
+ wait_ordered = true;
+
loops = 0;
- while (delalloc_bytes && loops < 3) {
+ while ((delalloc_bytes || dio_bytes) && loops < 3) {
nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
/*
}
delalloc_bytes = percpu_counter_sum_positive(
&fs_info->delalloc_bytes);
+ dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
}
}
return ret;
}
-static void calc_refill_bytes(struct btrfs_block_rsv *block_rsv,
- u64 *metadata_bytes, u64 *qgroup_bytes)
-{
- *metadata_bytes = 0;
- *qgroup_bytes = 0;
-
- spin_lock(&block_rsv->lock);
- if (block_rsv->reserved < block_rsv->size)
- *metadata_bytes = block_rsv->size - block_rsv->reserved;
- if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size)
- *qgroup_bytes = block_rsv->qgroup_rsv_size -
- block_rsv->qgroup_rsv_reserved;
- spin_unlock(&block_rsv->lock);
-}
-
-/**
- * btrfs_inode_rsv_refill - refill the inode block rsv.
- * @inode - the inode we are refilling.
- * @flush - the flushing restriction.
- *
- * Essentially the same as btrfs_block_rsv_refill, except it uses the
- * block_rsv->size as the minimum size. We'll either refill the missing amount
- * or return if we already have enough space. This will also handle the reserve
- * tracepoint for the reserved amount.
- */
-static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
- enum btrfs_reserve_flush_enum flush)
-{
- struct btrfs_root *root = inode->root;
- struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
- u64 num_bytes, last = 0;
- u64 qgroup_num_bytes;
- int ret = -ENOSPC;
-
- calc_refill_bytes(block_rsv, &num_bytes, &qgroup_num_bytes);
- if (num_bytes == 0)
- return 0;
-
- do {
- ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes,
- true);
- if (ret)
- return ret;
- ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
- if (ret) {
- btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
- last = num_bytes;
- /*
- * If we are fragmented we can end up with a lot of
- * outstanding extents which will make our size be much
- * larger than our reserved amount.
- *
- * If the reservation happens here, it might be very
- * big though not needed in the end, if the delalloc
- * flushing happens.
- *
- * If this is the case try and do the reserve again.
- */
- if (flush == BTRFS_RESERVE_FLUSH_ALL)
- calc_refill_bytes(block_rsv, &num_bytes,
- &qgroup_num_bytes);
- if (num_bytes == 0)
- return 0;
- }
- } while (ret && last != num_bytes);
-
- if (!ret) {
- block_rsv_add_bytes(block_rsv, num_bytes, false);
- trace_btrfs_space_reservation(root->fs_info, "delalloc",
- btrfs_ino(inode), num_bytes, 1);
-
- /* Don't forget to increase qgroup_rsv_reserved */
- spin_lock(&block_rsv->lock);
- block_rsv->qgroup_rsv_reserved += qgroup_num_bytes;
- spin_unlock(&block_rsv->lock);
- }
- return ret;
-}
-
static u64 __btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes, u64 *qgroup_to_release)
spin_unlock(&block_rsv->lock);
}
+static void calc_inode_reservations(struct btrfs_fs_info *fs_info,
+ u64 num_bytes, u64 *meta_reserve,
+ u64 *qgroup_reserve)
+{
+ u64 nr_extents = count_max_extents(num_bytes);
+ u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, num_bytes);
+
+ /* We add one for the inode update at finish ordered time */
+ *meta_reserve = btrfs_calc_trans_metadata_size(fs_info,
+ nr_extents + csum_leaves + 1);
+ *qgroup_reserve = nr_extents * fs_info->nodesize;
+}
+
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
+ u64 meta_reserve, qgroup_reserve;
unsigned nr_extents;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret = 0;
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
- /* Add our new extents and calculate the new rsv size. */
+ /*
+ * We always want to do it this way, every other way is wrong and ends
+ * in tears. Pre-reserving the amount we are going to add will always
+ * be the right way, because otherwise if we have enough parallelism we
+ * could end up with thousands of inodes all holding little bits of
+ * reservations they were able to make previously and the only way to
+ * reclaim that space is to ENOSPC out the operations and clear
+ * everything out and try again, which is bad. This way we just
+ * over-reserve slightly, and clean up the mess when we are done.
+ */
+ calc_inode_reservations(fs_info, num_bytes, &meta_reserve,
+ &qgroup_reserve);
+ ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true);
+ if (ret)
+ goto out_fail;
+ ret = reserve_metadata_bytes(root, block_rsv, meta_reserve, flush);
+ if (ret)
+ goto out_qgroup;
+
+ /*
+ * Now we need to update our outstanding extents and csum bytes _first_
+ * and then add the reservation to the block_rsv. This keeps us from
+ * racing with an ordered completion or some such that would think it
+ * needs to free the reservation we just made.
+ */
spin_lock(&inode->lock);
nr_extents = count_max_extents(num_bytes);
btrfs_mod_outstanding_extents(inode, nr_extents);
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
- ret = btrfs_inode_rsv_refill(inode, flush);
- if (unlikely(ret))
- goto out_fail;
+ /* Now we can safely add our space to our block rsv */
+ block_rsv_add_bytes(block_rsv, meta_reserve, false);
+ trace_btrfs_space_reservation(root->fs_info, "delalloc",
+ btrfs_ino(inode), meta_reserve, 1);
+
+ spin_lock(&block_rsv->lock);
+ block_rsv->qgroup_rsv_reserved += qgroup_reserve;
+ spin_unlock(&block_rsv->lock);
if (delalloc_lock)
mutex_unlock(&inode->delalloc_mutex);
return 0;
-
+out_qgroup:
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
out_fail:
- spin_lock(&inode->lock);
- nr_extents = count_max_extents(num_bytes);
- btrfs_mod_outstanding_extents(inode, -nr_extents);
- inode->csum_bytes -= num_bytes;
- btrfs_calculate_inode_block_rsv_size(fs_info, inode);
- spin_unlock(&inode->lock);
-
btrfs_inode_rsv_release(inode, true);
if (delalloc_lock)
mutex_unlock(&inode->delalloc_mutex);
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
int old_ref_mod, new_ref_mod;
- btrfs_ref_tree_mod(root, buf->start, buf->len, parent,
- root->root_key.objectid,
- btrfs_header_level(buf), 0,
- BTRFS_DROP_DELAYED_REF);
+ btrfs_ref_tree_mod(fs_info, &generic_ref);
ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL,
&old_ref_mod, &new_ref_mod);
BUG_ON(ret); /* -ENOMEM */
}
out:
if (pin)
- add_pinned_bytes(fs_info, buf->len, true,
- root->root_key.objectid);
+ add_pinned_bytes(fs_info, &generic_ref, 1);
if (last_ref) {
/*
}
/* Can return -ENOMEM */
-int btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
- u64 owner, u64 offset)
+int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_ref generic_ref = { 0 };
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int old_ref_mod, new_ref_mod;
int ret;
if (btrfs_is_testing(fs_info))
return 0;
- if (root_objectid != BTRFS_TREE_LOG_OBJECTID)
- btrfs_ref_tree_mod(root, bytenr, num_bytes, parent,
- root_objectid, owner, offset,
- BTRFS_DROP_DELAYED_REF);
-
- btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF, bytenr,
- num_bytes, parent);
- generic_ref.real_root = root->root_key.objectid;
/*
* tree log blocks never actually go into the extent allocation
* tree, just update pinning info and exit early.
*/
- if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
- WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
+ if ((ref->type == BTRFS_REF_METADATA &&
+ ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) ||
+ (ref->type == BTRFS_REF_DATA &&
+ ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) {
/* unlocks the pinned mutex */
- btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
+ btrfs_pin_extent(fs_info, ref->bytenr, ref->len, 1);
old_ref_mod = new_ref_mod = 0;
ret = 0;
- } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- btrfs_init_tree_ref(&generic_ref, (int)owner, root_objectid);
- ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL,
+ } else if (ref->type == BTRFS_REF_METADATA) {
+ ret = btrfs_add_delayed_tree_ref(trans, ref, NULL,
&old_ref_mod, &new_ref_mod);
} else {
- ret = btrfs_add_delayed_data_ref(trans, bytenr,
- num_bytes, parent,
- root_objectid, owner, offset,
- 0, BTRFS_DROP_DELAYED_REF,
+ ret = btrfs_add_delayed_data_ref(trans, ref, 0,
&old_ref_mod, &new_ref_mod);
}
- if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0) {
- bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
+ if (!((ref->type == BTRFS_REF_METADATA &&
+ ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) ||
+ (ref->type == BTRFS_REF_DATA &&
+ ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)))
+ btrfs_ref_tree_mod(fs_info, ref);
- add_pinned_bytes(fs_info, num_bytes, metadata, root_objectid);
- }
+ if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
+ add_pinned_bytes(fs_info, ref, 1);
return ret;
}
u64 offset, u64 ram_bytes,
struct btrfs_key *ins)
{
+ struct btrfs_ref generic_ref = { 0 };
int ret;
BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
- btrfs_ref_tree_mod(root, ins->objectid, ins->offset, 0,
- root->root_key.objectid, owner, offset,
- BTRFS_ADD_DELAYED_EXTENT);
-
- ret = btrfs_add_delayed_data_ref(trans, ins->objectid,
- ins->offset, 0,
- root->root_key.objectid, owner,
- offset, ram_bytes,
- BTRFS_ADD_DELAYED_EXTENT, NULL, NULL);
+ btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
+ ins->objectid, ins->offset, 0);
+ btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner, offset);
+ btrfs_ref_tree_mod(root->fs_info, &generic_ref);
+ ret = btrfs_add_delayed_data_ref(trans, &generic_ref,
+ ram_bytes, NULL, NULL);
return ret;
}
extent_op->is_data = false;
extent_op->level = level;
- btrfs_ref_tree_mod(root, ins.objectid, ins.offset, parent,
- root_objectid, level, 0,
- BTRFS_ADD_DELAYED_EXTENT);
btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
ins.objectid, ins.offset, parent);
generic_ref.real_root = root->root_key.objectid;
btrfs_init_tree_ref(&generic_ref, level, root_objectid);
+ btrfs_ref_tree_mod(fs_info, &generic_ref);
ret = btrfs_add_delayed_tree_ref(trans, &generic_ref,
extent_op, NULL, NULL);
if (ret)
BUG_ON(ret); /* -ENOMEM */
ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
- ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
+ ret = btrfs_set_disk_extent_flags(trans, eb->start,
eb->len, flag,
btrfs_header_level(eb), 0);
BUG_ON(ret); /* -ENOMEM */
u64 parent;
struct btrfs_key key;
struct btrfs_key first_key;
+ struct btrfs_ref ref = { 0 };
struct extent_buffer *next;
int level = wc->level;
int reada = 0;
wc->drop_level = level;
find_next_key(path, level, &wc->drop_progress);
- ret = btrfs_free_extent(trans, root, bytenr, fs_info->nodesize,
- parent, root->root_key.objectid,
- level - 1, 0);
+ btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
+ fs_info->nodesize, parent);
+ btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid);
+ ret = btrfs_free_extent(trans, &ref);
if (ret)
goto out_unlock;
}
else
ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
- ret = btrfs_qgroup_trace_leaf_items(trans, eb);
- if (ret) {
- btrfs_err_rl(fs_info,
- "error %d accounting leaf items. Quota is out of sync, rescan required.",
+ if (is_fstree(root->root_key.objectid)) {
+ ret = btrfs_qgroup_trace_leaf_items(trans, eb);
+ if (ret) {
+ btrfs_err_rl(fs_info,
+ "error %d accounting leaf items, quota is out of sync, rescan required",
ret);
+ }
}
}
/* make block locked assertion in btrfs_clean_tree_block happy */
* it while performing the free space search since we have already
* held back allocations.
*/
-static int btrfs_trim_free_extents(struct btrfs_device *device,
- struct fstrim_range *range, u64 *trimmed)
+static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
{
- u64 start, len = 0, end = 0;
+ u64 start = SZ_1M, len = 0, end = 0;
int ret;
- start = max_t(u64, range->start, SZ_1M);
*trimmed = 0;
/* Discard not supported = nothing to do. */
break;
}
- /* Keep going until we satisfy minlen or reach end of space */
- if (len < range->minlen) {
- mutex_unlock(&fs_info->chunk_mutex);
- start += len;
- continue;
- }
-
- /* If we are out of the passed range break */
- if (start > range->start + range->len - 1) {
- mutex_unlock(&fs_info->chunk_mutex);
- break;
- }
-
- start = max(range->start, start);
- len = min(range->len, len);
-
ret = btrfs_issue_discard(device->bdev, start, len,
&bytes);
if (!ret)
start += len;
*trimmed += bytes;
- /* We've trimmed enough */
- if (*trimmed >= range->len)
- break;
-
if (fatal_signal_pending(current)) {
ret = -ERESTARTSYS;
break;
mutex_lock(&fs_info->fs_devices->device_list_mutex);
devices = &fs_info->fs_devices->devices;
list_for_each_entry(device, devices, dev_list) {
- ret = btrfs_trim_free_extents(device, range, &group_trimmed);
+ ret = btrfs_trim_free_extents(device, &group_trimmed);
if (ret) {
dev_failed++;
dev_ret = ret;