1 // SPDX-License-Identifier: GPL-2.0
5 #include "space-info.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
13 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
14 bool may_use_included)
17 return s_info->bytes_used + s_info->bytes_reserved +
18 s_info->bytes_pinned + s_info->bytes_readonly +
19 (may_use_included ? s_info->bytes_may_use : 0);
23 * after adding space to the filesystem, we need to clear the full flags
24 * on all the space infos.
26 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
28 struct list_head *head = &info->space_info;
29 struct btrfs_space_info *found;
32 list_for_each_entry_rcu(found, head, list)
37 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
40 struct btrfs_space_info *space_info;
44 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
48 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
55 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
56 INIT_LIST_HEAD(&space_info->block_groups[i]);
57 init_rwsem(&space_info->groups_sem);
58 spin_lock_init(&space_info->lock);
59 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
60 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
61 INIT_LIST_HEAD(&space_info->ro_bgs);
62 INIT_LIST_HEAD(&space_info->tickets);
63 INIT_LIST_HEAD(&space_info->priority_tickets);
65 ret = btrfs_sysfs_add_space_info_type(info, space_info);
69 list_add_rcu(&space_info->list, &info->space_info);
70 if (flags & BTRFS_BLOCK_GROUP_DATA)
71 info->data_sinfo = space_info;
76 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
78 struct btrfs_super_block *disk_super;
84 disk_super = fs_info->super_copy;
85 if (!btrfs_super_root(disk_super))
88 features = btrfs_super_incompat_flags(disk_super);
89 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
92 flags = BTRFS_BLOCK_GROUP_SYSTEM;
93 ret = create_space_info(fs_info, flags);
98 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
99 ret = create_space_info(fs_info, flags);
101 flags = BTRFS_BLOCK_GROUP_METADATA;
102 ret = create_space_info(fs_info, flags);
106 flags = BTRFS_BLOCK_GROUP_DATA;
107 ret = create_space_info(fs_info, flags);
113 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
114 u64 total_bytes, u64 bytes_used,
116 struct btrfs_space_info **space_info)
118 struct btrfs_space_info *found;
121 factor = btrfs_bg_type_to_factor(flags);
123 found = btrfs_find_space_info(info, flags);
125 spin_lock(&found->lock);
126 found->total_bytes += total_bytes;
127 found->disk_total += total_bytes * factor;
128 found->bytes_used += bytes_used;
129 found->disk_used += bytes_used * factor;
130 found->bytes_readonly += bytes_readonly;
133 btrfs_try_granting_tickets(info, found);
134 spin_unlock(&found->lock);
138 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
141 struct list_head *head = &info->space_info;
142 struct btrfs_space_info *found;
144 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
147 list_for_each_entry_rcu(found, head, list) {
148 if (found->flags & flags) {
157 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
159 return (global->size << 1);
162 static int can_overcommit(struct btrfs_fs_info *fs_info,
163 struct btrfs_space_info *space_info, u64 bytes,
164 enum btrfs_reserve_flush_enum flush)
171 /* Don't overcommit when in mixed mode. */
172 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
175 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
176 profile = btrfs_system_alloc_profile(fs_info);
178 profile = btrfs_metadata_alloc_profile(fs_info);
180 used = btrfs_space_info_used(space_info, true);
181 avail = atomic64_read(&fs_info->free_chunk_space);
184 * If we have dup, raid1 or raid10 then only half of the free
185 * space is actually usable. For raid56, the space info used
186 * doesn't include the parity drive, so we don't have to
189 factor = btrfs_bg_type_to_factor(profile);
190 avail = div_u64(avail, factor);
193 * If we aren't flushing all things, let us overcommit up to
194 * 1/2th of the space. If we can flush, don't let us overcommit
195 * too much, let it overcommit up to 1/8 of the space.
197 if (flush == BTRFS_RESERVE_FLUSH_ALL)
202 if (used + bytes < space_info->total_bytes + avail)
208 * This is for space we already have accounted in space_info->bytes_may_use, so
209 * basically when we're returning space from block_rsv's.
211 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
212 struct btrfs_space_info *space_info)
214 struct list_head *head;
215 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
217 lockdep_assert_held(&space_info->lock);
219 head = &space_info->priority_tickets;
221 while (!list_empty(head)) {
222 struct reserve_ticket *ticket;
223 u64 used = btrfs_space_info_used(space_info, true);
225 ticket = list_first_entry(head, struct reserve_ticket, list);
227 /* Check and see if our ticket can be satisified now. */
228 if ((used + ticket->bytes <= space_info->total_bytes) ||
229 can_overcommit(fs_info, space_info, ticket->bytes, flush)) {
230 btrfs_space_info_update_bytes_may_use(fs_info,
233 list_del_init(&ticket->list);
235 space_info->tickets_id++;
236 wake_up(&ticket->wait);
242 if (head == &space_info->priority_tickets) {
243 head = &space_info->tickets;
244 flush = BTRFS_RESERVE_FLUSH_ALL;
249 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
251 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
252 spin_lock(&__rsv->lock); \
253 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
254 __rsv->size, __rsv->reserved); \
255 spin_unlock(&__rsv->lock); \
258 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
259 struct btrfs_space_info *info)
261 lockdep_assert_held(&info->lock);
263 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
265 info->total_bytes - btrfs_space_info_used(info, true),
266 info->full ? "" : "not ");
268 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
269 info->total_bytes, info->bytes_used, info->bytes_pinned,
270 info->bytes_reserved, info->bytes_may_use,
271 info->bytes_readonly);
273 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
274 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
275 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
276 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
277 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
281 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
282 struct btrfs_space_info *info, u64 bytes,
283 int dump_block_groups)
285 struct btrfs_block_group *cache;
288 spin_lock(&info->lock);
289 __btrfs_dump_space_info(fs_info, info);
290 spin_unlock(&info->lock);
292 if (!dump_block_groups)
295 down_read(&info->groups_sem);
297 list_for_each_entry(cache, &info->block_groups[index], list) {
298 spin_lock(&cache->lock);
300 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
301 cache->start, cache->length, cache->used, cache->pinned,
302 cache->reserved, cache->ro ? "[readonly]" : "");
303 btrfs_dump_free_space(cache, bytes);
304 spin_unlock(&cache->lock);
306 if (++index < BTRFS_NR_RAID_TYPES)
308 up_read(&info->groups_sem);
311 static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
312 unsigned long nr_pages, int nr_items)
314 struct super_block *sb = fs_info->sb;
316 if (down_read_trylock(&sb->s_umount)) {
317 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
318 up_read(&sb->s_umount);
321 * We needn't worry the filesystem going from r/w to r/o though
322 * we don't acquire ->s_umount mutex, because the filesystem
323 * should guarantee the delalloc inodes list be empty after
324 * the filesystem is readonly(all dirty pages are written to
327 btrfs_start_delalloc_roots(fs_info, nr_items);
328 if (!current->journal_info)
329 btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
333 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
339 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
340 nr = div64_u64(to_reclaim, bytes);
346 #define EXTENT_SIZE_PER_ITEM SZ_256K
349 * shrink metadata reservation for delalloc
351 static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
352 u64 orig, bool wait_ordered)
354 struct btrfs_space_info *space_info;
355 struct btrfs_trans_handle *trans;
361 unsigned long nr_pages;
364 /* Calc the number of the pages we need flush for space reservation */
365 items = calc_reclaim_items_nr(fs_info, to_reclaim);
366 to_reclaim = items * EXTENT_SIZE_PER_ITEM;
368 trans = (struct btrfs_trans_handle *)current->journal_info;
369 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
371 delalloc_bytes = percpu_counter_sum_positive(
372 &fs_info->delalloc_bytes);
373 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
374 if (delalloc_bytes == 0 && dio_bytes == 0) {
378 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
383 * If we are doing more ordered than delalloc we need to just wait on
384 * ordered extents, otherwise we'll waste time trying to flush delalloc
385 * that likely won't give us the space back we need.
387 if (dio_bytes > delalloc_bytes)
391 while ((delalloc_bytes || dio_bytes) && loops < 3) {
392 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
395 * Triggers inode writeback for up to nr_pages. This will invoke
396 * ->writepages callback and trigger delalloc filling
397 * (btrfs_run_delalloc_range()).
399 btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
402 * We need to wait for the compressed pages to start before
405 async_pages = atomic_read(&fs_info->async_delalloc_pages);
410 * Calculate how many compressed pages we want to be written
411 * before we continue. I.e if there are more async pages than we
412 * require wait_event will wait until nr_pages are written.
414 if (async_pages <= nr_pages)
417 async_pages -= nr_pages;
419 wait_event(fs_info->async_submit_wait,
420 atomic_read(&fs_info->async_delalloc_pages) <=
423 spin_lock(&space_info->lock);
424 if (list_empty(&space_info->tickets) &&
425 list_empty(&space_info->priority_tickets)) {
426 spin_unlock(&space_info->lock);
429 spin_unlock(&space_info->lock);
432 if (wait_ordered && !trans) {
433 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
435 time_left = schedule_timeout_killable(1);
439 delalloc_bytes = percpu_counter_sum_positive(
440 &fs_info->delalloc_bytes);
441 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
446 * maybe_commit_transaction - possibly commit the transaction if its ok to
447 * @root - the root we're allocating for
448 * @bytes - the number of bytes we want to reserve
449 * @force - force the commit
451 * This will check to make sure that committing the transaction will actually
452 * get us somewhere and then commit the transaction if it does. Otherwise it
453 * will return -ENOSPC.
455 static int may_commit_transaction(struct btrfs_fs_info *fs_info,
456 struct btrfs_space_info *space_info)
458 struct reserve_ticket *ticket = NULL;
459 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
460 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
461 struct btrfs_trans_handle *trans;
463 u64 reclaim_bytes = 0;
464 u64 cur_free_bytes = 0;
466 trans = (struct btrfs_trans_handle *)current->journal_info;
470 spin_lock(&space_info->lock);
471 cur_free_bytes = btrfs_space_info_used(space_info, true);
472 if (cur_free_bytes < space_info->total_bytes)
473 cur_free_bytes = space_info->total_bytes - cur_free_bytes;
477 if (!list_empty(&space_info->priority_tickets))
478 ticket = list_first_entry(&space_info->priority_tickets,
479 struct reserve_ticket, list);
480 else if (!list_empty(&space_info->tickets))
481 ticket = list_first_entry(&space_info->tickets,
482 struct reserve_ticket, list);
483 bytes_needed = (ticket) ? ticket->bytes : 0;
485 if (bytes_needed > cur_free_bytes)
486 bytes_needed -= cur_free_bytes;
489 spin_unlock(&space_info->lock);
494 trans = btrfs_join_transaction(fs_info->extent_root);
496 return PTR_ERR(trans);
499 * See if there is enough pinned space to make this reservation, or if
500 * we have block groups that are going to be freed, allowing us to
501 * possibly do a chunk allocation the next loop through.
503 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) ||
504 __percpu_counter_compare(&space_info->total_bytes_pinned,
506 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
510 * See if there is some space in the delayed insertion reservation for
513 if (space_info != delayed_rsv->space_info)
516 spin_lock(&delayed_rsv->lock);
517 reclaim_bytes += delayed_rsv->reserved;
518 spin_unlock(&delayed_rsv->lock);
520 spin_lock(&delayed_refs_rsv->lock);
521 reclaim_bytes += delayed_refs_rsv->reserved;
522 spin_unlock(&delayed_refs_rsv->lock);
523 if (reclaim_bytes >= bytes_needed)
525 bytes_needed -= reclaim_bytes;
527 if (__percpu_counter_compare(&space_info->total_bytes_pinned,
529 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0)
533 return btrfs_commit_transaction(trans);
535 btrfs_end_transaction(trans);
540 * Try to flush some data based on policy set by @state. This is only advisory
541 * and may fail for various reasons. The caller is supposed to examine the
542 * state of @space_info to detect the outcome.
544 static void flush_space(struct btrfs_fs_info *fs_info,
545 struct btrfs_space_info *space_info, u64 num_bytes,
548 struct btrfs_root *root = fs_info->extent_root;
549 struct btrfs_trans_handle *trans;
554 case FLUSH_DELAYED_ITEMS_NR:
555 case FLUSH_DELAYED_ITEMS:
556 if (state == FLUSH_DELAYED_ITEMS_NR)
557 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
561 trans = btrfs_join_transaction(root);
563 ret = PTR_ERR(trans);
566 ret = btrfs_run_delayed_items_nr(trans, nr);
567 btrfs_end_transaction(trans);
570 case FLUSH_DELALLOC_WAIT:
571 shrink_delalloc(fs_info, num_bytes * 2, num_bytes,
572 state == FLUSH_DELALLOC_WAIT);
574 case FLUSH_DELAYED_REFS_NR:
575 case FLUSH_DELAYED_REFS:
576 trans = btrfs_join_transaction(root);
578 ret = PTR_ERR(trans);
581 if (state == FLUSH_DELAYED_REFS_NR)
582 nr = calc_reclaim_items_nr(fs_info, num_bytes);
585 btrfs_run_delayed_refs(trans, nr);
586 btrfs_end_transaction(trans);
589 case ALLOC_CHUNK_FORCE:
590 trans = btrfs_join_transaction(root);
592 ret = PTR_ERR(trans);
595 ret = btrfs_chunk_alloc(trans,
596 btrfs_metadata_alloc_profile(fs_info),
597 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
599 btrfs_end_transaction(trans);
600 if (ret > 0 || ret == -ENOSPC)
603 case RUN_DELAYED_IPUTS:
605 * If we have pending delayed iputs then we could free up a
606 * bunch of pinned space, so make sure we run the iputs before
607 * we do our pinned bytes check below.
609 btrfs_run_delayed_iputs(fs_info);
610 btrfs_wait_on_delayed_iputs(fs_info);
613 ret = may_commit_transaction(fs_info, space_info);
620 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
626 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
627 struct btrfs_space_info *space_info)
629 struct reserve_ticket *ticket;
634 list_for_each_entry(ticket, &space_info->tickets, list)
635 to_reclaim += ticket->bytes;
636 list_for_each_entry(ticket, &space_info->priority_tickets, list)
637 to_reclaim += ticket->bytes;
641 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
642 if (can_overcommit(fs_info, space_info, to_reclaim,
643 BTRFS_RESERVE_FLUSH_ALL))
646 used = btrfs_space_info_used(space_info, true);
648 if (can_overcommit(fs_info, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
649 expected = div_factor_fine(space_info->total_bytes, 95);
651 expected = div_factor_fine(space_info->total_bytes, 90);
654 to_reclaim = used - expected;
657 to_reclaim = min(to_reclaim, space_info->bytes_may_use +
658 space_info->bytes_reserved);
662 static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
663 struct btrfs_space_info *space_info,
666 u64 thresh = div_factor_fine(space_info->total_bytes, 98);
668 /* If we're just plain full then async reclaim just slows us down. */
669 if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
672 if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info))
675 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
676 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
680 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
681 * @fs_info - fs_info for this fs
682 * @space_info - the space info we were flushing
684 * We call this when we've exhausted our flushing ability and haven't made
685 * progress in satisfying tickets. The reservation code handles tickets in
686 * order, so if there is a large ticket first and then smaller ones we could
687 * very well satisfy the smaller tickets. This will attempt to wake up any
688 * tickets in the list to catch this case.
690 * This function returns true if it was able to make progress by clearing out
691 * other tickets, or if it stumbles across a ticket that was smaller than the
694 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
695 struct btrfs_space_info *space_info)
697 struct reserve_ticket *ticket;
698 u64 tickets_id = space_info->tickets_id;
699 u64 first_ticket_bytes = 0;
701 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
702 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
703 __btrfs_dump_space_info(fs_info, space_info);
706 while (!list_empty(&space_info->tickets) &&
707 tickets_id == space_info->tickets_id) {
708 ticket = list_first_entry(&space_info->tickets,
709 struct reserve_ticket, list);
712 * may_commit_transaction will avoid committing the transaction
713 * if it doesn't feel like the space reclaimed by the commit
714 * would result in the ticket succeeding. However if we have a
715 * smaller ticket in the queue it may be small enough to be
716 * satisified by committing the transaction, so if any
717 * subsequent ticket is smaller than the first ticket go ahead
718 * and send us back for another loop through the enospc flushing
721 if (first_ticket_bytes == 0)
722 first_ticket_bytes = ticket->bytes;
723 else if (first_ticket_bytes > ticket->bytes)
726 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
727 btrfs_info(fs_info, "failing ticket with %llu bytes",
730 list_del_init(&ticket->list);
731 ticket->error = -ENOSPC;
732 wake_up(&ticket->wait);
735 * We're just throwing tickets away, so more flushing may not
736 * trip over btrfs_try_granting_tickets, so we need to call it
737 * here to see if we can make progress with the next ticket in
740 btrfs_try_granting_tickets(fs_info, space_info);
742 return (tickets_id != space_info->tickets_id);
746 * This is for normal flushers, we can wait all goddamned day if we want to. We
747 * will loop and continuously try to flush as long as we are making progress.
748 * We count progress as clearing off tickets each time we have to loop.
750 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
752 struct btrfs_fs_info *fs_info;
753 struct btrfs_space_info *space_info;
756 int commit_cycles = 0;
759 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
760 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
762 spin_lock(&space_info->lock);
763 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
765 space_info->flush = 0;
766 spin_unlock(&space_info->lock);
769 last_tickets_id = space_info->tickets_id;
770 spin_unlock(&space_info->lock);
772 flush_state = FLUSH_DELAYED_ITEMS_NR;
774 flush_space(fs_info, space_info, to_reclaim, flush_state);
775 spin_lock(&space_info->lock);
776 if (list_empty(&space_info->tickets)) {
777 space_info->flush = 0;
778 spin_unlock(&space_info->lock);
781 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
783 if (last_tickets_id == space_info->tickets_id) {
786 last_tickets_id = space_info->tickets_id;
787 flush_state = FLUSH_DELAYED_ITEMS_NR;
793 * We don't want to force a chunk allocation until we've tried
794 * pretty hard to reclaim space. Think of the case where we
795 * freed up a bunch of space and so have a lot of pinned space
796 * to reclaim. We would rather use that than possibly create a
797 * underutilized metadata chunk. So if this is our first run
798 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
799 * commit the transaction. If nothing has changed the next go
800 * around then we can force a chunk allocation.
802 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
805 if (flush_state > COMMIT_TRANS) {
807 if (commit_cycles > 2) {
808 if (maybe_fail_all_tickets(fs_info, space_info)) {
809 flush_state = FLUSH_DELAYED_ITEMS_NR;
812 space_info->flush = 0;
815 flush_state = FLUSH_DELAYED_ITEMS_NR;
818 spin_unlock(&space_info->lock);
819 } while (flush_state <= COMMIT_TRANS);
822 void btrfs_init_async_reclaim_work(struct work_struct *work)
824 INIT_WORK(work, btrfs_async_reclaim_metadata_space);
827 static const enum btrfs_flush_state priority_flush_states[] = {
828 FLUSH_DELAYED_ITEMS_NR,
833 static const enum btrfs_flush_state evict_flush_states[] = {
834 FLUSH_DELAYED_ITEMS_NR,
836 FLUSH_DELAYED_REFS_NR,
844 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
845 struct btrfs_space_info *space_info,
846 struct reserve_ticket *ticket,
847 const enum btrfs_flush_state *states,
853 spin_lock(&space_info->lock);
854 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
856 spin_unlock(&space_info->lock);
859 spin_unlock(&space_info->lock);
863 flush_space(fs_info, space_info, to_reclaim, states[flush_state]);
865 spin_lock(&space_info->lock);
866 if (ticket->bytes == 0) {
867 spin_unlock(&space_info->lock);
870 spin_unlock(&space_info->lock);
871 } while (flush_state < states_nr);
874 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
875 struct btrfs_space_info *space_info,
876 struct reserve_ticket *ticket)
882 spin_lock(&space_info->lock);
883 while (ticket->bytes > 0 && ticket->error == 0) {
884 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
887 * Delete us from the list. After we unlock the space
888 * info, we don't want the async reclaim job to reserve
889 * space for this ticket. If that would happen, then the
890 * ticket's task would not known that space was reserved
891 * despite getting an error, resulting in a space leak
892 * (bytes_may_use counter of our space_info).
894 list_del_init(&ticket->list);
895 ticket->error = -EINTR;
898 spin_unlock(&space_info->lock);
902 finish_wait(&ticket->wait, &wait);
903 spin_lock(&space_info->lock);
905 spin_unlock(&space_info->lock);
909 * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket
911 * @space_info - the space_info for the reservation
912 * @ticket - the ticket for the reservation
913 * @flush - how much we can flush
915 * This does the work of figuring out how to flush for the ticket, waiting for
916 * the reservation, and returning the appropriate error if there is one.
918 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
919 struct btrfs_space_info *space_info,
920 struct reserve_ticket *ticket,
921 enum btrfs_reserve_flush_enum flush)
926 case BTRFS_RESERVE_FLUSH_ALL:
927 wait_reserve_ticket(fs_info, space_info, ticket);
929 case BTRFS_RESERVE_FLUSH_LIMIT:
930 priority_reclaim_metadata_space(fs_info, space_info, ticket,
931 priority_flush_states,
932 ARRAY_SIZE(priority_flush_states));
934 case BTRFS_RESERVE_FLUSH_EVICT:
935 priority_reclaim_metadata_space(fs_info, space_info, ticket,
937 ARRAY_SIZE(evict_flush_states));
944 spin_lock(&space_info->lock);
946 if (ticket->bytes || ticket->error) {
948 * Need to delete here for priority tickets. For regular tickets
949 * either the async reclaim job deletes the ticket from the list
950 * or we delete it ourselves at wait_reserve_ticket().
952 list_del_init(&ticket->list);
956 spin_unlock(&space_info->lock);
957 ASSERT(list_empty(&ticket->list));
959 * Check that we can't have an error set if the reservation succeeded,
960 * as that would confuse tasks and lead them to error out without
961 * releasing reserved space (if an error happens the expectation is that
962 * space wasn't reserved at all).
964 ASSERT(!(ticket->bytes == 0 && ticket->error));
969 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
970 * @root - the root we're allocating for
971 * @space_info - the space info we want to allocate from
972 * @orig_bytes - the number of bytes we want
973 * @flush - whether or not we can flush to make our reservation
975 * This will reserve orig_bytes number of bytes from the space info associated
976 * with the block_rsv. If there is not enough space it will make an attempt to
977 * flush out space to make room. It will do this by flushing delalloc if
978 * possible or committing the transaction. If flush is 0 then no attempts to
979 * regain reservations will be made and this will fail if there is not enough
982 static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
983 struct btrfs_space_info *space_info,
985 enum btrfs_reserve_flush_enum flush)
987 struct reserve_ticket ticket;
990 bool pending_tickets;
993 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
995 spin_lock(&space_info->lock);
997 used = btrfs_space_info_used(space_info, true);
998 pending_tickets = !list_empty(&space_info->tickets) ||
999 !list_empty(&space_info->priority_tickets);
1002 * Carry on if we have enough space (short-circuit) OR call
1003 * can_overcommit() to ensure we can overcommit to continue.
1005 if (!pending_tickets &&
1006 ((used + orig_bytes <= space_info->total_bytes) ||
1007 can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1008 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1014 * If we couldn't make a reservation then setup our reservation ticket
1015 * and kick the async worker if it's not already running.
1017 * If we are a priority flusher then we just need to add our ticket to
1018 * the list and we will do our own flushing further down.
1020 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1021 ticket.bytes = orig_bytes;
1023 init_waitqueue_head(&ticket.wait);
1024 if (flush == BTRFS_RESERVE_FLUSH_ALL) {
1025 list_add_tail(&ticket.list, &space_info->tickets);
1026 if (!space_info->flush) {
1027 space_info->flush = 1;
1028 trace_btrfs_trigger_flush(fs_info,
1032 queue_work(system_unbound_wq,
1033 &fs_info->async_reclaim_work);
1036 list_add_tail(&ticket.list,
1037 &space_info->priority_tickets);
1039 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1042 * We will do the space reservation dance during log replay,
1043 * which means we won't have fs_info->fs_root set, so don't do
1044 * the async reclaim as we will panic.
1046 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1047 need_do_async_reclaim(fs_info, space_info, used) &&
1048 !work_busy(&fs_info->async_reclaim_work)) {
1049 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1050 orig_bytes, flush, "preempt");
1051 queue_work(system_unbound_wq,
1052 &fs_info->async_reclaim_work);
1055 spin_unlock(&space_info->lock);
1056 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1059 return handle_reserve_ticket(fs_info, space_info, &ticket, flush);
1063 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
1064 * @root - the root we're allocating for
1065 * @block_rsv - the block_rsv we're allocating for
1066 * @orig_bytes - the number of bytes we want
1067 * @flush - whether or not we can flush to make our reservation
1069 * This will reserve orig_bytes number of bytes from the space info associated
1070 * with the block_rsv. If there is not enough space it will make an attempt to
1071 * flush out space to make room. It will do this by flushing delalloc if
1072 * possible or committing the transaction. If flush is 0 then no attempts to
1073 * regain reservations will be made and this will fail if there is not enough
1076 int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
1077 struct btrfs_block_rsv *block_rsv,
1079 enum btrfs_reserve_flush_enum flush)
1081 struct btrfs_fs_info *fs_info = root->fs_info;
1082 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
1085 ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
1087 if (ret == -ENOSPC &&
1088 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
1089 if (block_rsv != global_rsv &&
1090 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes))
1093 if (ret == -ENOSPC) {
1094 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1095 block_rsv->space_info->flags,
1098 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1099 btrfs_dump_space_info(fs_info, block_rsv->space_info,