]> asedeno.scripts.mit.edu Git - linux.git/blob - fs/btrfs/transaction.c
Merge tag 'for-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power...
[linux.git] / fs / btrfs / transaction.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/fs.h>
7 #include <linux/slab.h>
8 #include <linux/sched.h>
9 #include <linux/writeback.h>
10 #include <linux/pagemap.h>
11 #include <linux/blkdev.h>
12 #include <linux/uuid.h>
13 #include "ctree.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "locking.h"
17 #include "tree-log.h"
18 #include "inode-map.h"
19 #include "volumes.h"
20 #include "dev-replace.h"
21 #include "qgroup.h"
22
23 #define BTRFS_ROOT_TRANS_TAG 0
24
25 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
26         [TRANS_STATE_RUNNING]           = 0U,
27         [TRANS_STATE_BLOCKED]           =  __TRANS_START,
28         [TRANS_STATE_COMMIT_START]      = (__TRANS_START | __TRANS_ATTACH),
29         [TRANS_STATE_COMMIT_DOING]      = (__TRANS_START |
30                                            __TRANS_ATTACH |
31                                            __TRANS_JOIN),
32         [TRANS_STATE_UNBLOCKED]         = (__TRANS_START |
33                                            __TRANS_ATTACH |
34                                            __TRANS_JOIN |
35                                            __TRANS_JOIN_NOLOCK),
36         [TRANS_STATE_COMPLETED]         = (__TRANS_START |
37                                            __TRANS_ATTACH |
38                                            __TRANS_JOIN |
39                                            __TRANS_JOIN_NOLOCK),
40 };
41
42 void btrfs_put_transaction(struct btrfs_transaction *transaction)
43 {
44         WARN_ON(refcount_read(&transaction->use_count) == 0);
45         if (refcount_dec_and_test(&transaction->use_count)) {
46                 BUG_ON(!list_empty(&transaction->list));
47                 WARN_ON(!RB_EMPTY_ROOT(
48                                 &transaction->delayed_refs.href_root.rb_root));
49                 if (transaction->delayed_refs.pending_csums)
50                         btrfs_err(transaction->fs_info,
51                                   "pending csums is %llu",
52                                   transaction->delayed_refs.pending_csums);
53                 /*
54                  * If any block groups are found in ->deleted_bgs then it's
55                  * because the transaction was aborted and a commit did not
56                  * happen (things failed before writing the new superblock
57                  * and calling btrfs_finish_extent_commit()), so we can not
58                  * discard the physical locations of the block groups.
59                  */
60                 while (!list_empty(&transaction->deleted_bgs)) {
61                         struct btrfs_block_group_cache *cache;
62
63                         cache = list_first_entry(&transaction->deleted_bgs,
64                                                  struct btrfs_block_group_cache,
65                                                  bg_list);
66                         list_del_init(&cache->bg_list);
67                         btrfs_put_block_group_trimming(cache);
68                         btrfs_put_block_group(cache);
69                 }
70                 WARN_ON(!list_empty(&transaction->dev_update_list));
71                 kfree(transaction);
72         }
73 }
74
75 static noinline void switch_commit_roots(struct btrfs_transaction *trans)
76 {
77         struct btrfs_fs_info *fs_info = trans->fs_info;
78         struct btrfs_root *root, *tmp;
79
80         down_write(&fs_info->commit_root_sem);
81         list_for_each_entry_safe(root, tmp, &trans->switch_commits,
82                                  dirty_list) {
83                 list_del_init(&root->dirty_list);
84                 free_extent_buffer(root->commit_root);
85                 root->commit_root = btrfs_root_node(root);
86                 if (is_fstree(root->root_key.objectid))
87                         btrfs_unpin_free_ino(root);
88                 extent_io_tree_release(&root->dirty_log_pages);
89                 btrfs_qgroup_clean_swapped_blocks(root);
90         }
91
92         /* We can free old roots now. */
93         spin_lock(&trans->dropped_roots_lock);
94         while (!list_empty(&trans->dropped_roots)) {
95                 root = list_first_entry(&trans->dropped_roots,
96                                         struct btrfs_root, root_list);
97                 list_del_init(&root->root_list);
98                 spin_unlock(&trans->dropped_roots_lock);
99                 btrfs_drop_and_free_fs_root(fs_info, root);
100                 spin_lock(&trans->dropped_roots_lock);
101         }
102         spin_unlock(&trans->dropped_roots_lock);
103         up_write(&fs_info->commit_root_sem);
104 }
105
106 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
107                                          unsigned int type)
108 {
109         if (type & TRANS_EXTWRITERS)
110                 atomic_inc(&trans->num_extwriters);
111 }
112
113 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
114                                          unsigned int type)
115 {
116         if (type & TRANS_EXTWRITERS)
117                 atomic_dec(&trans->num_extwriters);
118 }
119
120 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
121                                           unsigned int type)
122 {
123         atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
124 }
125
126 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
127 {
128         return atomic_read(&trans->num_extwriters);
129 }
130
131 /*
132  * either allocate a new transaction or hop into the existing one
133  */
134 static noinline int join_transaction(struct btrfs_fs_info *fs_info,
135                                      unsigned int type)
136 {
137         struct btrfs_transaction *cur_trans;
138
139         spin_lock(&fs_info->trans_lock);
140 loop:
141         /* The file system has been taken offline. No new transactions. */
142         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
143                 spin_unlock(&fs_info->trans_lock);
144                 return -EROFS;
145         }
146
147         cur_trans = fs_info->running_transaction;
148         if (cur_trans) {
149                 if (cur_trans->aborted) {
150                         spin_unlock(&fs_info->trans_lock);
151                         return cur_trans->aborted;
152                 }
153                 if (btrfs_blocked_trans_types[cur_trans->state] & type) {
154                         spin_unlock(&fs_info->trans_lock);
155                         return -EBUSY;
156                 }
157                 refcount_inc(&cur_trans->use_count);
158                 atomic_inc(&cur_trans->num_writers);
159                 extwriter_counter_inc(cur_trans, type);
160                 spin_unlock(&fs_info->trans_lock);
161                 return 0;
162         }
163         spin_unlock(&fs_info->trans_lock);
164
165         /*
166          * If we are ATTACH, we just want to catch the current transaction,
167          * and commit it. If there is no transaction, just return ENOENT.
168          */
169         if (type == TRANS_ATTACH)
170                 return -ENOENT;
171
172         /*
173          * JOIN_NOLOCK only happens during the transaction commit, so
174          * it is impossible that ->running_transaction is NULL
175          */
176         BUG_ON(type == TRANS_JOIN_NOLOCK);
177
178         cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS);
179         if (!cur_trans)
180                 return -ENOMEM;
181
182         spin_lock(&fs_info->trans_lock);
183         if (fs_info->running_transaction) {
184                 /*
185                  * someone started a transaction after we unlocked.  Make sure
186                  * to redo the checks above
187                  */
188                 kfree(cur_trans);
189                 goto loop;
190         } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
191                 spin_unlock(&fs_info->trans_lock);
192                 kfree(cur_trans);
193                 return -EROFS;
194         }
195
196         cur_trans->fs_info = fs_info;
197         atomic_set(&cur_trans->num_writers, 1);
198         extwriter_counter_init(cur_trans, type);
199         init_waitqueue_head(&cur_trans->writer_wait);
200         init_waitqueue_head(&cur_trans->commit_wait);
201         cur_trans->state = TRANS_STATE_RUNNING;
202         /*
203          * One for this trans handle, one so it will live on until we
204          * commit the transaction.
205          */
206         refcount_set(&cur_trans->use_count, 2);
207         cur_trans->flags = 0;
208         cur_trans->start_time = ktime_get_seconds();
209
210         memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
211
212         cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
213         cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
214         atomic_set(&cur_trans->delayed_refs.num_entries, 0);
215
216         /*
217          * although the tree mod log is per file system and not per transaction,
218          * the log must never go across transaction boundaries.
219          */
220         smp_mb();
221         if (!list_empty(&fs_info->tree_mod_seq_list))
222                 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
223         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
224                 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
225         atomic64_set(&fs_info->tree_mod_seq, 0);
226
227         spin_lock_init(&cur_trans->delayed_refs.lock);
228
229         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
230         INIT_LIST_HEAD(&cur_trans->dev_update_list);
231         INIT_LIST_HEAD(&cur_trans->switch_commits);
232         INIT_LIST_HEAD(&cur_trans->dirty_bgs);
233         INIT_LIST_HEAD(&cur_trans->io_bgs);
234         INIT_LIST_HEAD(&cur_trans->dropped_roots);
235         mutex_init(&cur_trans->cache_write_mutex);
236         spin_lock_init(&cur_trans->dirty_bgs_lock);
237         INIT_LIST_HEAD(&cur_trans->deleted_bgs);
238         spin_lock_init(&cur_trans->dropped_roots_lock);
239         list_add_tail(&cur_trans->list, &fs_info->trans_list);
240         extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
241                         IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
242         fs_info->generation++;
243         cur_trans->transid = fs_info->generation;
244         fs_info->running_transaction = cur_trans;
245         cur_trans->aborted = 0;
246         spin_unlock(&fs_info->trans_lock);
247
248         return 0;
249 }
250
251 /*
252  * this does all the record keeping required to make sure that a reference
253  * counted root is properly recorded in a given transaction.  This is required
254  * to make sure the old root from before we joined the transaction is deleted
255  * when the transaction commits
256  */
257 static int record_root_in_trans(struct btrfs_trans_handle *trans,
258                                struct btrfs_root *root,
259                                int force)
260 {
261         struct btrfs_fs_info *fs_info = root->fs_info;
262
263         if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
264             root->last_trans < trans->transid) || force) {
265                 WARN_ON(root == fs_info->extent_root);
266                 WARN_ON(!force && root->commit_root != root->node);
267
268                 /*
269                  * see below for IN_TRANS_SETUP usage rules
270                  * we have the reloc mutex held now, so there
271                  * is only one writer in this function
272                  */
273                 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
274
275                 /* make sure readers find IN_TRANS_SETUP before
276                  * they find our root->last_trans update
277                  */
278                 smp_wmb();
279
280                 spin_lock(&fs_info->fs_roots_radix_lock);
281                 if (root->last_trans == trans->transid && !force) {
282                         spin_unlock(&fs_info->fs_roots_radix_lock);
283                         return 0;
284                 }
285                 radix_tree_tag_set(&fs_info->fs_roots_radix,
286                                    (unsigned long)root->root_key.objectid,
287                                    BTRFS_ROOT_TRANS_TAG);
288                 spin_unlock(&fs_info->fs_roots_radix_lock);
289                 root->last_trans = trans->transid;
290
291                 /* this is pretty tricky.  We don't want to
292                  * take the relocation lock in btrfs_record_root_in_trans
293                  * unless we're really doing the first setup for this root in
294                  * this transaction.
295                  *
296                  * Normally we'd use root->last_trans as a flag to decide
297                  * if we want to take the expensive mutex.
298                  *
299                  * But, we have to set root->last_trans before we
300                  * init the relocation root, otherwise, we trip over warnings
301                  * in ctree.c.  The solution used here is to flag ourselves
302                  * with root IN_TRANS_SETUP.  When this is 1, we're still
303                  * fixing up the reloc trees and everyone must wait.
304                  *
305                  * When this is zero, they can trust root->last_trans and fly
306                  * through btrfs_record_root_in_trans without having to take the
307                  * lock.  smp_wmb() makes sure that all the writes above are
308                  * done before we pop in the zero below
309                  */
310                 btrfs_init_reloc_root(trans, root);
311                 smp_mb__before_atomic();
312                 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
313         }
314         return 0;
315 }
316
317
318 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
319                             struct btrfs_root *root)
320 {
321         struct btrfs_fs_info *fs_info = root->fs_info;
322         struct btrfs_transaction *cur_trans = trans->transaction;
323
324         /* Add ourselves to the transaction dropped list */
325         spin_lock(&cur_trans->dropped_roots_lock);
326         list_add_tail(&root->root_list, &cur_trans->dropped_roots);
327         spin_unlock(&cur_trans->dropped_roots_lock);
328
329         /* Make sure we don't try to update the root at commit time */
330         spin_lock(&fs_info->fs_roots_radix_lock);
331         radix_tree_tag_clear(&fs_info->fs_roots_radix,
332                              (unsigned long)root->root_key.objectid,
333                              BTRFS_ROOT_TRANS_TAG);
334         spin_unlock(&fs_info->fs_roots_radix_lock);
335 }
336
337 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
338                                struct btrfs_root *root)
339 {
340         struct btrfs_fs_info *fs_info = root->fs_info;
341
342         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
343                 return 0;
344
345         /*
346          * see record_root_in_trans for comments about IN_TRANS_SETUP usage
347          * and barriers
348          */
349         smp_rmb();
350         if (root->last_trans == trans->transid &&
351             !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
352                 return 0;
353
354         mutex_lock(&fs_info->reloc_mutex);
355         record_root_in_trans(trans, root, 0);
356         mutex_unlock(&fs_info->reloc_mutex);
357
358         return 0;
359 }
360
361 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
362 {
363         return (trans->state >= TRANS_STATE_BLOCKED &&
364                 trans->state < TRANS_STATE_UNBLOCKED &&
365                 !trans->aborted);
366 }
367
368 /* wait for commit against the current transaction to become unblocked
369  * when this is done, it is safe to start a new transaction, but the current
370  * transaction might not be fully on disk.
371  */
372 static void wait_current_trans(struct btrfs_fs_info *fs_info)
373 {
374         struct btrfs_transaction *cur_trans;
375
376         spin_lock(&fs_info->trans_lock);
377         cur_trans = fs_info->running_transaction;
378         if (cur_trans && is_transaction_blocked(cur_trans)) {
379                 refcount_inc(&cur_trans->use_count);
380                 spin_unlock(&fs_info->trans_lock);
381
382                 wait_event(fs_info->transaction_wait,
383                            cur_trans->state >= TRANS_STATE_UNBLOCKED ||
384                            cur_trans->aborted);
385                 btrfs_put_transaction(cur_trans);
386         } else {
387                 spin_unlock(&fs_info->trans_lock);
388         }
389 }
390
391 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
392 {
393         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
394                 return 0;
395
396         if (type == TRANS_START)
397                 return 1;
398
399         return 0;
400 }
401
402 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
403 {
404         struct btrfs_fs_info *fs_info = root->fs_info;
405
406         if (!fs_info->reloc_ctl ||
407             !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
408             root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
409             root->reloc_root)
410                 return false;
411
412         return true;
413 }
414
415 static struct btrfs_trans_handle *
416 start_transaction(struct btrfs_root *root, unsigned int num_items,
417                   unsigned int type, enum btrfs_reserve_flush_enum flush,
418                   bool enforce_qgroups)
419 {
420         struct btrfs_fs_info *fs_info = root->fs_info;
421         struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
422         struct btrfs_trans_handle *h;
423         struct btrfs_transaction *cur_trans;
424         u64 num_bytes = 0;
425         u64 qgroup_reserved = 0;
426         bool reloc_reserved = false;
427         int ret;
428
429         /* Send isn't supposed to start transactions. */
430         ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
431
432         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
433                 return ERR_PTR(-EROFS);
434
435         if (current->journal_info) {
436                 WARN_ON(type & TRANS_EXTWRITERS);
437                 h = current->journal_info;
438                 refcount_inc(&h->use_count);
439                 WARN_ON(refcount_read(&h->use_count) > 2);
440                 h->orig_rsv = h->block_rsv;
441                 h->block_rsv = NULL;
442                 goto got_it;
443         }
444
445         /*
446          * Do the reservation before we join the transaction so we can do all
447          * the appropriate flushing if need be.
448          */
449         if (num_items && root != fs_info->chunk_root) {
450                 struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
451                 u64 delayed_refs_bytes = 0;
452
453                 qgroup_reserved = num_items * fs_info->nodesize;
454                 ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
455                                 enforce_qgroups);
456                 if (ret)
457                         return ERR_PTR(ret);
458
459                 /*
460                  * We want to reserve all the bytes we may need all at once, so
461                  * we only do 1 enospc flushing cycle per transaction start.  We
462                  * accomplish this by simply assuming we'll do 2 x num_items
463                  * worth of delayed refs updates in this trans handle, and
464                  * refill that amount for whatever is missing in the reserve.
465                  */
466                 num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
467                 if (delayed_refs_rsv->full == 0) {
468                         delayed_refs_bytes = num_bytes;
469                         num_bytes <<= 1;
470                 }
471
472                 /*
473                  * Do the reservation for the relocation root creation
474                  */
475                 if (need_reserve_reloc_root(root)) {
476                         num_bytes += fs_info->nodesize;
477                         reloc_reserved = true;
478                 }
479
480                 ret = btrfs_block_rsv_add(root, rsv, num_bytes, flush);
481                 if (ret)
482                         goto reserve_fail;
483                 if (delayed_refs_bytes) {
484                         btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
485                                                           delayed_refs_bytes);
486                         num_bytes -= delayed_refs_bytes;
487                 }
488         } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
489                    !delayed_refs_rsv->full) {
490                 /*
491                  * Some people call with btrfs_start_transaction(root, 0)
492                  * because they can be throttled, but have some other mechanism
493                  * for reserving space.  We still want these guys to refill the
494                  * delayed block_rsv so just add 1 items worth of reservation
495                  * here.
496                  */
497                 ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
498                 if (ret)
499                         goto reserve_fail;
500         }
501 again:
502         h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
503         if (!h) {
504                 ret = -ENOMEM;
505                 goto alloc_fail;
506         }
507
508         /*
509          * If we are JOIN_NOLOCK we're already committing a transaction and
510          * waiting on this guy, so we don't need to do the sb_start_intwrite
511          * because we're already holding a ref.  We need this because we could
512          * have raced in and did an fsync() on a file which can kick a commit
513          * and then we deadlock with somebody doing a freeze.
514          *
515          * If we are ATTACH, it means we just want to catch the current
516          * transaction and commit it, so we needn't do sb_start_intwrite(). 
517          */
518         if (type & __TRANS_FREEZABLE)
519                 sb_start_intwrite(fs_info->sb);
520
521         if (may_wait_transaction(fs_info, type))
522                 wait_current_trans(fs_info);
523
524         do {
525                 ret = join_transaction(fs_info, type);
526                 if (ret == -EBUSY) {
527                         wait_current_trans(fs_info);
528                         if (unlikely(type == TRANS_ATTACH))
529                                 ret = -ENOENT;
530                 }
531         } while (ret == -EBUSY);
532
533         if (ret < 0)
534                 goto join_fail;
535
536         cur_trans = fs_info->running_transaction;
537
538         h->transid = cur_trans->transid;
539         h->transaction = cur_trans;
540         h->root = root;
541         refcount_set(&h->use_count, 1);
542         h->fs_info = root->fs_info;
543
544         h->type = type;
545         h->can_flush_pending_bgs = true;
546         INIT_LIST_HEAD(&h->new_bgs);
547
548         smp_mb();
549         if (cur_trans->state >= TRANS_STATE_BLOCKED &&
550             may_wait_transaction(fs_info, type)) {
551                 current->journal_info = h;
552                 btrfs_commit_transaction(h);
553                 goto again;
554         }
555
556         if (num_bytes) {
557                 trace_btrfs_space_reservation(fs_info, "transaction",
558                                               h->transid, num_bytes, 1);
559                 h->block_rsv = &fs_info->trans_block_rsv;
560                 h->bytes_reserved = num_bytes;
561                 h->reloc_reserved = reloc_reserved;
562         }
563
564 got_it:
565         btrfs_record_root_in_trans(h, root);
566
567         if (!current->journal_info)
568                 current->journal_info = h;
569         return h;
570
571 join_fail:
572         if (type & __TRANS_FREEZABLE)
573                 sb_end_intwrite(fs_info->sb);
574         kmem_cache_free(btrfs_trans_handle_cachep, h);
575 alloc_fail:
576         if (num_bytes)
577                 btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
578                                         num_bytes);
579 reserve_fail:
580         btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
581         return ERR_PTR(ret);
582 }
583
584 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
585                                                    unsigned int num_items)
586 {
587         return start_transaction(root, num_items, TRANS_START,
588                                  BTRFS_RESERVE_FLUSH_ALL, true);
589 }
590
591 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
592                                         struct btrfs_root *root,
593                                         unsigned int num_items,
594                                         int min_factor)
595 {
596         struct btrfs_fs_info *fs_info = root->fs_info;
597         struct btrfs_trans_handle *trans;
598         u64 num_bytes;
599         int ret;
600
601         /*
602          * We have two callers: unlink and block group removal.  The
603          * former should succeed even if we will temporarily exceed
604          * quota and the latter operates on the extent root so
605          * qgroup enforcement is ignored anyway.
606          */
607         trans = start_transaction(root, num_items, TRANS_START,
608                                   BTRFS_RESERVE_FLUSH_ALL, false);
609         if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
610                 return trans;
611
612         trans = btrfs_start_transaction(root, 0);
613         if (IS_ERR(trans))
614                 return trans;
615
616         num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
617         ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv,
618                                        num_bytes, min_factor);
619         if (ret) {
620                 btrfs_end_transaction(trans);
621                 return ERR_PTR(ret);
622         }
623
624         trans->block_rsv = &fs_info->trans_block_rsv;
625         trans->bytes_reserved = num_bytes;
626         trace_btrfs_space_reservation(fs_info, "transaction",
627                                       trans->transid, num_bytes, 1);
628
629         return trans;
630 }
631
632 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
633 {
634         return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
635                                  true);
636 }
637
638 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
639 {
640         return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
641                                  BTRFS_RESERVE_NO_FLUSH, true);
642 }
643
644 /*
645  * btrfs_attach_transaction() - catch the running transaction
646  *
647  * It is used when we want to commit the current the transaction, but
648  * don't want to start a new one.
649  *
650  * Note: If this function return -ENOENT, it just means there is no
651  * running transaction. But it is possible that the inactive transaction
652  * is still in the memory, not fully on disk. If you hope there is no
653  * inactive transaction in the fs when -ENOENT is returned, you should
654  * invoke
655  *     btrfs_attach_transaction_barrier()
656  */
657 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
658 {
659         return start_transaction(root, 0, TRANS_ATTACH,
660                                  BTRFS_RESERVE_NO_FLUSH, true);
661 }
662
663 /*
664  * btrfs_attach_transaction_barrier() - catch the running transaction
665  *
666  * It is similar to the above function, the difference is this one
667  * will wait for all the inactive transactions until they fully
668  * complete.
669  */
670 struct btrfs_trans_handle *
671 btrfs_attach_transaction_barrier(struct btrfs_root *root)
672 {
673         struct btrfs_trans_handle *trans;
674
675         trans = start_transaction(root, 0, TRANS_ATTACH,
676                                   BTRFS_RESERVE_NO_FLUSH, true);
677         if (trans == ERR_PTR(-ENOENT))
678                 btrfs_wait_for_commit(root->fs_info, 0);
679
680         return trans;
681 }
682
683 /* wait for a transaction commit to be fully complete */
684 static noinline void wait_for_commit(struct btrfs_transaction *commit)
685 {
686         wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
687 }
688
689 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
690 {
691         struct btrfs_transaction *cur_trans = NULL, *t;
692         int ret = 0;
693
694         if (transid) {
695                 if (transid <= fs_info->last_trans_committed)
696                         goto out;
697
698                 /* find specified transaction */
699                 spin_lock(&fs_info->trans_lock);
700                 list_for_each_entry(t, &fs_info->trans_list, list) {
701                         if (t->transid == transid) {
702                                 cur_trans = t;
703                                 refcount_inc(&cur_trans->use_count);
704                                 ret = 0;
705                                 break;
706                         }
707                         if (t->transid > transid) {
708                                 ret = 0;
709                                 break;
710                         }
711                 }
712                 spin_unlock(&fs_info->trans_lock);
713
714                 /*
715                  * The specified transaction doesn't exist, or we
716                  * raced with btrfs_commit_transaction
717                  */
718                 if (!cur_trans) {
719                         if (transid > fs_info->last_trans_committed)
720                                 ret = -EINVAL;
721                         goto out;
722                 }
723         } else {
724                 /* find newest transaction that is committing | committed */
725                 spin_lock(&fs_info->trans_lock);
726                 list_for_each_entry_reverse(t, &fs_info->trans_list,
727                                             list) {
728                         if (t->state >= TRANS_STATE_COMMIT_START) {
729                                 if (t->state == TRANS_STATE_COMPLETED)
730                                         break;
731                                 cur_trans = t;
732                                 refcount_inc(&cur_trans->use_count);
733                                 break;
734                         }
735                 }
736                 spin_unlock(&fs_info->trans_lock);
737                 if (!cur_trans)
738                         goto out;  /* nothing committing|committed */
739         }
740
741         wait_for_commit(cur_trans);
742         btrfs_put_transaction(cur_trans);
743 out:
744         return ret;
745 }
746
747 void btrfs_throttle(struct btrfs_fs_info *fs_info)
748 {
749         wait_current_trans(fs_info);
750 }
751
752 static int should_end_transaction(struct btrfs_trans_handle *trans)
753 {
754         struct btrfs_fs_info *fs_info = trans->fs_info;
755
756         if (btrfs_check_space_for_delayed_refs(fs_info))
757                 return 1;
758
759         return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
760 }
761
762 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
763 {
764         struct btrfs_transaction *cur_trans = trans->transaction;
765
766         smp_mb();
767         if (cur_trans->state >= TRANS_STATE_BLOCKED ||
768             cur_trans->delayed_refs.flushing)
769                 return 1;
770
771         return should_end_transaction(trans);
772 }
773
774 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
775
776 {
777         struct btrfs_fs_info *fs_info = trans->fs_info;
778
779         if (!trans->block_rsv) {
780                 ASSERT(!trans->bytes_reserved);
781                 return;
782         }
783
784         if (!trans->bytes_reserved)
785                 return;
786
787         ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
788         trace_btrfs_space_reservation(fs_info, "transaction",
789                                       trans->transid, trans->bytes_reserved, 0);
790         btrfs_block_rsv_release(fs_info, trans->block_rsv,
791                                 trans->bytes_reserved);
792         trans->bytes_reserved = 0;
793 }
794
795 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
796                                    int throttle)
797 {
798         struct btrfs_fs_info *info = trans->fs_info;
799         struct btrfs_transaction *cur_trans = trans->transaction;
800         int lock = (trans->type != TRANS_JOIN_NOLOCK);
801         int err = 0;
802
803         if (refcount_read(&trans->use_count) > 1) {
804                 refcount_dec(&trans->use_count);
805                 trans->block_rsv = trans->orig_rsv;
806                 return 0;
807         }
808
809         btrfs_trans_release_metadata(trans);
810         trans->block_rsv = NULL;
811
812         btrfs_create_pending_block_groups(trans);
813
814         btrfs_trans_release_chunk_metadata(trans);
815
816         if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
817                 if (throttle)
818                         return btrfs_commit_transaction(trans);
819                 else
820                         wake_up_process(info->transaction_kthread);
821         }
822
823         if (trans->type & __TRANS_FREEZABLE)
824                 sb_end_intwrite(info->sb);
825
826         WARN_ON(cur_trans != info->running_transaction);
827         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
828         atomic_dec(&cur_trans->num_writers);
829         extwriter_counter_dec(cur_trans, trans->type);
830
831         cond_wake_up(&cur_trans->writer_wait);
832         btrfs_put_transaction(cur_trans);
833
834         if (current->journal_info == trans)
835                 current->journal_info = NULL;
836
837         if (throttle)
838                 btrfs_run_delayed_iputs(info);
839
840         if (trans->aborted ||
841             test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
842                 wake_up_process(info->transaction_kthread);
843                 err = -EIO;
844         }
845
846         kmem_cache_free(btrfs_trans_handle_cachep, trans);
847         return err;
848 }
849
850 int btrfs_end_transaction(struct btrfs_trans_handle *trans)
851 {
852         return __btrfs_end_transaction(trans, 0);
853 }
854
855 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
856 {
857         return __btrfs_end_transaction(trans, 1);
858 }
859
860 /*
861  * when btree blocks are allocated, they have some corresponding bits set for
862  * them in one of two extent_io trees.  This is used to make sure all of
863  * those extents are sent to disk but does not wait on them
864  */
865 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
866                                struct extent_io_tree *dirty_pages, int mark)
867 {
868         int err = 0;
869         int werr = 0;
870         struct address_space *mapping = fs_info->btree_inode->i_mapping;
871         struct extent_state *cached_state = NULL;
872         u64 start = 0;
873         u64 end;
874
875         atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
876         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
877                                       mark, &cached_state)) {
878                 bool wait_writeback = false;
879
880                 err = convert_extent_bit(dirty_pages, start, end,
881                                          EXTENT_NEED_WAIT,
882                                          mark, &cached_state);
883                 /*
884                  * convert_extent_bit can return -ENOMEM, which is most of the
885                  * time a temporary error. So when it happens, ignore the error
886                  * and wait for writeback of this range to finish - because we
887                  * failed to set the bit EXTENT_NEED_WAIT for the range, a call
888                  * to __btrfs_wait_marked_extents() would not know that
889                  * writeback for this range started and therefore wouldn't
890                  * wait for it to finish - we don't want to commit a
891                  * superblock that points to btree nodes/leafs for which
892                  * writeback hasn't finished yet (and without errors).
893                  * We cleanup any entries left in the io tree when committing
894                  * the transaction (through extent_io_tree_release()).
895                  */
896                 if (err == -ENOMEM) {
897                         err = 0;
898                         wait_writeback = true;
899                 }
900                 if (!err)
901                         err = filemap_fdatawrite_range(mapping, start, end);
902                 if (err)
903                         werr = err;
904                 else if (wait_writeback)
905                         werr = filemap_fdatawait_range(mapping, start, end);
906                 free_extent_state(cached_state);
907                 cached_state = NULL;
908                 cond_resched();
909                 start = end + 1;
910         }
911         atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
912         return werr;
913 }
914
915 /*
916  * when btree blocks are allocated, they have some corresponding bits set for
917  * them in one of two extent_io trees.  This is used to make sure all of
918  * those extents are on disk for transaction or log commit.  We wait
919  * on all the pages and clear them from the dirty pages state tree
920  */
921 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
922                                        struct extent_io_tree *dirty_pages)
923 {
924         int err = 0;
925         int werr = 0;
926         struct address_space *mapping = fs_info->btree_inode->i_mapping;
927         struct extent_state *cached_state = NULL;
928         u64 start = 0;
929         u64 end;
930
931         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
932                                       EXTENT_NEED_WAIT, &cached_state)) {
933                 /*
934                  * Ignore -ENOMEM errors returned by clear_extent_bit().
935                  * When committing the transaction, we'll remove any entries
936                  * left in the io tree. For a log commit, we don't remove them
937                  * after committing the log because the tree can be accessed
938                  * concurrently - we do it only at transaction commit time when
939                  * it's safe to do it (through extent_io_tree_release()).
940                  */
941                 err = clear_extent_bit(dirty_pages, start, end,
942                                        EXTENT_NEED_WAIT, 0, 0, &cached_state);
943                 if (err == -ENOMEM)
944                         err = 0;
945                 if (!err)
946                         err = filemap_fdatawait_range(mapping, start, end);
947                 if (err)
948                         werr = err;
949                 free_extent_state(cached_state);
950                 cached_state = NULL;
951                 cond_resched();
952                 start = end + 1;
953         }
954         if (err)
955                 werr = err;
956         return werr;
957 }
958
959 int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
960                        struct extent_io_tree *dirty_pages)
961 {
962         bool errors = false;
963         int err;
964
965         err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
966         if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
967                 errors = true;
968
969         if (errors && !err)
970                 err = -EIO;
971         return err;
972 }
973
974 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
975 {
976         struct btrfs_fs_info *fs_info = log_root->fs_info;
977         struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
978         bool errors = false;
979         int err;
980
981         ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
982
983         err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
984         if ((mark & EXTENT_DIRTY) &&
985             test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
986                 errors = true;
987
988         if ((mark & EXTENT_NEW) &&
989             test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
990                 errors = true;
991
992         if (errors && !err)
993                 err = -EIO;
994         return err;
995 }
996
997 /*
998  * When btree blocks are allocated the corresponding extents are marked dirty.
999  * This function ensures such extents are persisted on disk for transaction or
1000  * log commit.
1001  *
1002  * @trans: transaction whose dirty pages we'd like to write
1003  */
1004 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1005 {
1006         int ret;
1007         int ret2;
1008         struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1009         struct btrfs_fs_info *fs_info = trans->fs_info;
1010         struct blk_plug plug;
1011
1012         blk_start_plug(&plug);
1013         ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1014         blk_finish_plug(&plug);
1015         ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1016
1017         extent_io_tree_release(&trans->transaction->dirty_pages);
1018
1019         if (ret)
1020                 return ret;
1021         else if (ret2)
1022                 return ret2;
1023         else
1024                 return 0;
1025 }
1026
1027 /*
1028  * this is used to update the root pointer in the tree of tree roots.
1029  *
1030  * But, in the case of the extent allocation tree, updating the root
1031  * pointer may allocate blocks which may change the root of the extent
1032  * allocation tree.
1033  *
1034  * So, this loops and repeats and makes sure the cowonly root didn't
1035  * change while the root pointer was being updated in the metadata.
1036  */
1037 static int update_cowonly_root(struct btrfs_trans_handle *trans,
1038                                struct btrfs_root *root)
1039 {
1040         int ret;
1041         u64 old_root_bytenr;
1042         u64 old_root_used;
1043         struct btrfs_fs_info *fs_info = root->fs_info;
1044         struct btrfs_root *tree_root = fs_info->tree_root;
1045
1046         old_root_used = btrfs_root_used(&root->root_item);
1047
1048         while (1) {
1049                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1050                 if (old_root_bytenr == root->node->start &&
1051                     old_root_used == btrfs_root_used(&root->root_item))
1052                         break;
1053
1054                 btrfs_set_root_node(&root->root_item, root->node);
1055                 ret = btrfs_update_root(trans, tree_root,
1056                                         &root->root_key,
1057                                         &root->root_item);
1058                 if (ret)
1059                         return ret;
1060
1061                 old_root_used = btrfs_root_used(&root->root_item);
1062         }
1063
1064         return 0;
1065 }
1066
1067 /*
1068  * update all the cowonly tree roots on disk
1069  *
1070  * The error handling in this function may not be obvious. Any of the
1071  * failures will cause the file system to go offline. We still need
1072  * to clean up the delayed refs.
1073  */
1074 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1075 {
1076         struct btrfs_fs_info *fs_info = trans->fs_info;
1077         struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1078         struct list_head *io_bgs = &trans->transaction->io_bgs;
1079         struct list_head *next;
1080         struct extent_buffer *eb;
1081         int ret;
1082
1083         eb = btrfs_lock_root_node(fs_info->tree_root);
1084         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1085                               0, &eb);
1086         btrfs_tree_unlock(eb);
1087         free_extent_buffer(eb);
1088
1089         if (ret)
1090                 return ret;
1091
1092         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1093         if (ret)
1094                 return ret;
1095
1096         ret = btrfs_run_dev_stats(trans);
1097         if (ret)
1098                 return ret;
1099         ret = btrfs_run_dev_replace(trans);
1100         if (ret)
1101                 return ret;
1102         ret = btrfs_run_qgroups(trans);
1103         if (ret)
1104                 return ret;
1105
1106         ret = btrfs_setup_space_cache(trans);
1107         if (ret)
1108                 return ret;
1109
1110         /* run_qgroups might have added some more refs */
1111         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1112         if (ret)
1113                 return ret;
1114 again:
1115         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1116                 struct btrfs_root *root;
1117                 next = fs_info->dirty_cowonly_roots.next;
1118                 list_del_init(next);
1119                 root = list_entry(next, struct btrfs_root, dirty_list);
1120                 clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1121
1122                 if (root != fs_info->extent_root)
1123                         list_add_tail(&root->dirty_list,
1124                                       &trans->transaction->switch_commits);
1125                 ret = update_cowonly_root(trans, root);
1126                 if (ret)
1127                         return ret;
1128                 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1129                 if (ret)
1130                         return ret;
1131         }
1132
1133         while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1134                 ret = btrfs_write_dirty_block_groups(trans);
1135                 if (ret)
1136                         return ret;
1137                 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1138                 if (ret)
1139                         return ret;
1140         }
1141
1142         if (!list_empty(&fs_info->dirty_cowonly_roots))
1143                 goto again;
1144
1145         list_add_tail(&fs_info->extent_root->dirty_list,
1146                       &trans->transaction->switch_commits);
1147
1148         /* Update dev-replace pointer once everything is committed */
1149         fs_info->dev_replace.committed_cursor_left =
1150                 fs_info->dev_replace.cursor_left_last_write_of_item;
1151
1152         return 0;
1153 }
1154
1155 /*
1156  * dead roots are old snapshots that need to be deleted.  This allocates
1157  * a dirty root struct and adds it into the list of dead roots that need to
1158  * be deleted
1159  */
1160 void btrfs_add_dead_root(struct btrfs_root *root)
1161 {
1162         struct btrfs_fs_info *fs_info = root->fs_info;
1163
1164         spin_lock(&fs_info->trans_lock);
1165         if (list_empty(&root->root_list))
1166                 list_add_tail(&root->root_list, &fs_info->dead_roots);
1167         spin_unlock(&fs_info->trans_lock);
1168 }
1169
1170 /*
1171  * update all the cowonly tree roots on disk
1172  */
1173 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1174 {
1175         struct btrfs_fs_info *fs_info = trans->fs_info;
1176         struct btrfs_root *gang[8];
1177         int i;
1178         int ret;
1179         int err = 0;
1180
1181         spin_lock(&fs_info->fs_roots_radix_lock);
1182         while (1) {
1183                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1184                                                  (void **)gang, 0,
1185                                                  ARRAY_SIZE(gang),
1186                                                  BTRFS_ROOT_TRANS_TAG);
1187                 if (ret == 0)
1188                         break;
1189                 for (i = 0; i < ret; i++) {
1190                         struct btrfs_root *root = gang[i];
1191                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
1192                                         (unsigned long)root->root_key.objectid,
1193                                         BTRFS_ROOT_TRANS_TAG);
1194                         spin_unlock(&fs_info->fs_roots_radix_lock);
1195
1196                         btrfs_free_log(trans, root);
1197                         btrfs_update_reloc_root(trans, root);
1198
1199                         btrfs_save_ino_cache(root, trans);
1200
1201                         /* see comments in should_cow_block() */
1202                         clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1203                         smp_mb__after_atomic();
1204
1205                         if (root->commit_root != root->node) {
1206                                 list_add_tail(&root->dirty_list,
1207                                         &trans->transaction->switch_commits);
1208                                 btrfs_set_root_node(&root->root_item,
1209                                                     root->node);
1210                         }
1211
1212                         err = btrfs_update_root(trans, fs_info->tree_root,
1213                                                 &root->root_key,
1214                                                 &root->root_item);
1215                         spin_lock(&fs_info->fs_roots_radix_lock);
1216                         if (err)
1217                                 break;
1218                         btrfs_qgroup_free_meta_all_pertrans(root);
1219                 }
1220         }
1221         spin_unlock(&fs_info->fs_roots_radix_lock);
1222         return err;
1223 }
1224
1225 /*
1226  * defrag a given btree.
1227  * Every leaf in the btree is read and defragged.
1228  */
1229 int btrfs_defrag_root(struct btrfs_root *root)
1230 {
1231         struct btrfs_fs_info *info = root->fs_info;
1232         struct btrfs_trans_handle *trans;
1233         int ret;
1234
1235         if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1236                 return 0;
1237
1238         while (1) {
1239                 trans = btrfs_start_transaction(root, 0);
1240                 if (IS_ERR(trans))
1241                         return PTR_ERR(trans);
1242
1243                 ret = btrfs_defrag_leaves(trans, root);
1244
1245                 btrfs_end_transaction(trans);
1246                 btrfs_btree_balance_dirty(info);
1247                 cond_resched();
1248
1249                 if (btrfs_fs_closing(info) || ret != -EAGAIN)
1250                         break;
1251
1252                 if (btrfs_defrag_cancelled(info)) {
1253                         btrfs_debug(info, "defrag_root cancelled");
1254                         ret = -EAGAIN;
1255                         break;
1256                 }
1257         }
1258         clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1259         return ret;
1260 }
1261
1262 /*
1263  * Do all special snapshot related qgroup dirty hack.
1264  *
1265  * Will do all needed qgroup inherit and dirty hack like switch commit
1266  * roots inside one transaction and write all btree into disk, to make
1267  * qgroup works.
1268  */
1269 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1270                                    struct btrfs_root *src,
1271                                    struct btrfs_root *parent,
1272                                    struct btrfs_qgroup_inherit *inherit,
1273                                    u64 dst_objectid)
1274 {
1275         struct btrfs_fs_info *fs_info = src->fs_info;
1276         int ret;
1277
1278         /*
1279          * Save some performance in the case that qgroups are not
1280          * enabled. If this check races with the ioctl, rescan will
1281          * kick in anyway.
1282          */
1283         if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1284                 return 0;
1285
1286         /*
1287          * Ensure dirty @src will be committed.  Or, after coming
1288          * commit_fs_roots() and switch_commit_roots(), any dirty but not
1289          * recorded root will never be updated again, causing an outdated root
1290          * item.
1291          */
1292         record_root_in_trans(trans, src, 1);
1293
1294         /*
1295          * We are going to commit transaction, see btrfs_commit_transaction()
1296          * comment for reason locking tree_log_mutex
1297          */
1298         mutex_lock(&fs_info->tree_log_mutex);
1299
1300         ret = commit_fs_roots(trans);
1301         if (ret)
1302                 goto out;
1303         ret = btrfs_qgroup_account_extents(trans);
1304         if (ret < 0)
1305                 goto out;
1306
1307         /* Now qgroup are all updated, we can inherit it to new qgroups */
1308         ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
1309                                    inherit);
1310         if (ret < 0)
1311                 goto out;
1312
1313         /*
1314          * Now we do a simplified commit transaction, which will:
1315          * 1) commit all subvolume and extent tree
1316          *    To ensure all subvolume and extent tree have a valid
1317          *    commit_root to accounting later insert_dir_item()
1318          * 2) write all btree blocks onto disk
1319          *    This is to make sure later btree modification will be cowed
1320          *    Or commit_root can be populated and cause wrong qgroup numbers
1321          * In this simplified commit, we don't really care about other trees
1322          * like chunk and root tree, as they won't affect qgroup.
1323          * And we don't write super to avoid half committed status.
1324          */
1325         ret = commit_cowonly_roots(trans);
1326         if (ret)
1327                 goto out;
1328         switch_commit_roots(trans->transaction);
1329         ret = btrfs_write_and_wait_transaction(trans);
1330         if (ret)
1331                 btrfs_handle_fs_error(fs_info, ret,
1332                         "Error while writing out transaction for qgroup");
1333
1334 out:
1335         mutex_unlock(&fs_info->tree_log_mutex);
1336
1337         /*
1338          * Force parent root to be updated, as we recorded it before so its
1339          * last_trans == cur_transid.
1340          * Or it won't be committed again onto disk after later
1341          * insert_dir_item()
1342          */
1343         if (!ret)
1344                 record_root_in_trans(trans, parent, 1);
1345         return ret;
1346 }
1347
1348 /*
1349  * new snapshots need to be created at a very specific time in the
1350  * transaction commit.  This does the actual creation.
1351  *
1352  * Note:
1353  * If the error which may affect the commitment of the current transaction
1354  * happens, we should return the error number. If the error which just affect
1355  * the creation of the pending snapshots, just return 0.
1356  */
1357 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1358                                    struct btrfs_pending_snapshot *pending)
1359 {
1360
1361         struct btrfs_fs_info *fs_info = trans->fs_info;
1362         struct btrfs_key key;
1363         struct btrfs_root_item *new_root_item;
1364         struct btrfs_root *tree_root = fs_info->tree_root;
1365         struct btrfs_root *root = pending->root;
1366         struct btrfs_root *parent_root;
1367         struct btrfs_block_rsv *rsv;
1368         struct inode *parent_inode;
1369         struct btrfs_path *path;
1370         struct btrfs_dir_item *dir_item;
1371         struct dentry *dentry;
1372         struct extent_buffer *tmp;
1373         struct extent_buffer *old;
1374         struct timespec64 cur_time;
1375         int ret = 0;
1376         u64 to_reserve = 0;
1377         u64 index = 0;
1378         u64 objectid;
1379         u64 root_flags;
1380         uuid_le new_uuid;
1381
1382         ASSERT(pending->path);
1383         path = pending->path;
1384
1385         ASSERT(pending->root_item);
1386         new_root_item = pending->root_item;
1387
1388         pending->error = btrfs_find_free_objectid(tree_root, &objectid);
1389         if (pending->error)
1390                 goto no_free_objectid;
1391
1392         /*
1393          * Make qgroup to skip current new snapshot's qgroupid, as it is
1394          * accounted by later btrfs_qgroup_inherit().
1395          */
1396         btrfs_set_skip_qgroup(trans, objectid);
1397
1398         btrfs_reloc_pre_snapshot(pending, &to_reserve);
1399
1400         if (to_reserve > 0) {
1401                 pending->error = btrfs_block_rsv_add(root,
1402                                                      &pending->block_rsv,
1403                                                      to_reserve,
1404                                                      BTRFS_RESERVE_NO_FLUSH);
1405                 if (pending->error)
1406                         goto clear_skip_qgroup;
1407         }
1408
1409         key.objectid = objectid;
1410         key.offset = (u64)-1;
1411         key.type = BTRFS_ROOT_ITEM_KEY;
1412
1413         rsv = trans->block_rsv;
1414         trans->block_rsv = &pending->block_rsv;
1415         trans->bytes_reserved = trans->block_rsv->reserved;
1416         trace_btrfs_space_reservation(fs_info, "transaction",
1417                                       trans->transid,
1418                                       trans->bytes_reserved, 1);
1419         dentry = pending->dentry;
1420         parent_inode = pending->dir;
1421         parent_root = BTRFS_I(parent_inode)->root;
1422         record_root_in_trans(trans, parent_root, 0);
1423
1424         cur_time = current_time(parent_inode);
1425
1426         /*
1427          * insert the directory item
1428          */
1429         ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
1430         BUG_ON(ret); /* -ENOMEM */
1431
1432         /* check if there is a file/dir which has the same name. */
1433         dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1434                                          btrfs_ino(BTRFS_I(parent_inode)),
1435                                          dentry->d_name.name,
1436                                          dentry->d_name.len, 0);
1437         if (dir_item != NULL && !IS_ERR(dir_item)) {
1438                 pending->error = -EEXIST;
1439                 goto dir_item_existed;
1440         } else if (IS_ERR(dir_item)) {
1441                 ret = PTR_ERR(dir_item);
1442                 btrfs_abort_transaction(trans, ret);
1443                 goto fail;
1444         }
1445         btrfs_release_path(path);
1446
1447         /*
1448          * pull in the delayed directory update
1449          * and the delayed inode item
1450          * otherwise we corrupt the FS during
1451          * snapshot
1452          */
1453         ret = btrfs_run_delayed_items(trans);
1454         if (ret) {      /* Transaction aborted */
1455                 btrfs_abort_transaction(trans, ret);
1456                 goto fail;
1457         }
1458
1459         record_root_in_trans(trans, root, 0);
1460         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1461         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1462         btrfs_check_and_init_root_item(new_root_item);
1463
1464         root_flags = btrfs_root_flags(new_root_item);
1465         if (pending->readonly)
1466                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1467         else
1468                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1469         btrfs_set_root_flags(new_root_item, root_flags);
1470
1471         btrfs_set_root_generation_v2(new_root_item,
1472                         trans->transid);
1473         uuid_le_gen(&new_uuid);
1474         memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1475         memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1476                         BTRFS_UUID_SIZE);
1477         if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1478                 memset(new_root_item->received_uuid, 0,
1479                        sizeof(new_root_item->received_uuid));
1480                 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1481                 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1482                 btrfs_set_root_stransid(new_root_item, 0);
1483                 btrfs_set_root_rtransid(new_root_item, 0);
1484         }
1485         btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1486         btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1487         btrfs_set_root_otransid(new_root_item, trans->transid);
1488
1489         old = btrfs_lock_root_node(root);
1490         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1491         if (ret) {
1492                 btrfs_tree_unlock(old);
1493                 free_extent_buffer(old);
1494                 btrfs_abort_transaction(trans, ret);
1495                 goto fail;
1496         }
1497
1498         btrfs_set_lock_blocking_write(old);
1499
1500         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1501         /* clean up in any case */
1502         btrfs_tree_unlock(old);
1503         free_extent_buffer(old);
1504         if (ret) {
1505                 btrfs_abort_transaction(trans, ret);
1506                 goto fail;
1507         }
1508         /* see comments in should_cow_block() */
1509         set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1510         smp_wmb();
1511
1512         btrfs_set_root_node(new_root_item, tmp);
1513         /* record when the snapshot was created in key.offset */
1514         key.offset = trans->transid;
1515         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1516         btrfs_tree_unlock(tmp);
1517         free_extent_buffer(tmp);
1518         if (ret) {
1519                 btrfs_abort_transaction(trans, ret);
1520                 goto fail;
1521         }
1522
1523         /*
1524          * insert root back/forward references
1525          */
1526         ret = btrfs_add_root_ref(trans, objectid,
1527                                  parent_root->root_key.objectid,
1528                                  btrfs_ino(BTRFS_I(parent_inode)), index,
1529                                  dentry->d_name.name, dentry->d_name.len);
1530         if (ret) {
1531                 btrfs_abort_transaction(trans, ret);
1532                 goto fail;
1533         }
1534
1535         key.offset = (u64)-1;
1536         pending->snap = btrfs_read_fs_root_no_name(fs_info, &key);
1537         if (IS_ERR(pending->snap)) {
1538                 ret = PTR_ERR(pending->snap);
1539                 btrfs_abort_transaction(trans, ret);
1540                 goto fail;
1541         }
1542
1543         ret = btrfs_reloc_post_snapshot(trans, pending);
1544         if (ret) {
1545                 btrfs_abort_transaction(trans, ret);
1546                 goto fail;
1547         }
1548
1549         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1550         if (ret) {
1551                 btrfs_abort_transaction(trans, ret);
1552                 goto fail;
1553         }
1554
1555         /*
1556          * Do special qgroup accounting for snapshot, as we do some qgroup
1557          * snapshot hack to do fast snapshot.
1558          * To co-operate with that hack, we do hack again.
1559          * Or snapshot will be greatly slowed down by a subtree qgroup rescan
1560          */
1561         ret = qgroup_account_snapshot(trans, root, parent_root,
1562                                       pending->inherit, objectid);
1563         if (ret < 0)
1564                 goto fail;
1565
1566         ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
1567                                     dentry->d_name.len, BTRFS_I(parent_inode),
1568                                     &key, BTRFS_FT_DIR, index);
1569         /* We have check then name at the beginning, so it is impossible. */
1570         BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1571         if (ret) {
1572                 btrfs_abort_transaction(trans, ret);
1573                 goto fail;
1574         }
1575
1576         btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
1577                                          dentry->d_name.len * 2);
1578         parent_inode->i_mtime = parent_inode->i_ctime =
1579                 current_time(parent_inode);
1580         ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1581         if (ret) {
1582                 btrfs_abort_transaction(trans, ret);
1583                 goto fail;
1584         }
1585         ret = btrfs_uuid_tree_add(trans, new_uuid.b, BTRFS_UUID_KEY_SUBVOL,
1586                                   objectid);
1587         if (ret) {
1588                 btrfs_abort_transaction(trans, ret);
1589                 goto fail;
1590         }
1591         if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1592                 ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1593                                           BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1594                                           objectid);
1595                 if (ret && ret != -EEXIST) {
1596                         btrfs_abort_transaction(trans, ret);
1597                         goto fail;
1598                 }
1599         }
1600
1601         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1602         if (ret) {
1603                 btrfs_abort_transaction(trans, ret);
1604                 goto fail;
1605         }
1606
1607 fail:
1608         pending->error = ret;
1609 dir_item_existed:
1610         trans->block_rsv = rsv;
1611         trans->bytes_reserved = 0;
1612 clear_skip_qgroup:
1613         btrfs_clear_skip_qgroup(trans);
1614 no_free_objectid:
1615         kfree(new_root_item);
1616         pending->root_item = NULL;
1617         btrfs_free_path(path);
1618         pending->path = NULL;
1619
1620         return ret;
1621 }
1622
1623 /*
1624  * create all the snapshots we've scheduled for creation
1625  */
1626 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1627 {
1628         struct btrfs_pending_snapshot *pending, *next;
1629         struct list_head *head = &trans->transaction->pending_snapshots;
1630         int ret = 0;
1631
1632         list_for_each_entry_safe(pending, next, head, list) {
1633                 list_del(&pending->list);
1634                 ret = create_pending_snapshot(trans, pending);
1635                 if (ret)
1636                         break;
1637         }
1638         return ret;
1639 }
1640
1641 static void update_super_roots(struct btrfs_fs_info *fs_info)
1642 {
1643         struct btrfs_root_item *root_item;
1644         struct btrfs_super_block *super;
1645
1646         super = fs_info->super_copy;
1647
1648         root_item = &fs_info->chunk_root->root_item;
1649         super->chunk_root = root_item->bytenr;
1650         super->chunk_root_generation = root_item->generation;
1651         super->chunk_root_level = root_item->level;
1652
1653         root_item = &fs_info->tree_root->root_item;
1654         super->root = root_item->bytenr;
1655         super->generation = root_item->generation;
1656         super->root_level = root_item->level;
1657         if (btrfs_test_opt(fs_info, SPACE_CACHE))
1658                 super->cache_generation = root_item->generation;
1659         if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1660                 super->uuid_tree_generation = root_item->generation;
1661 }
1662
1663 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1664 {
1665         struct btrfs_transaction *trans;
1666         int ret = 0;
1667
1668         spin_lock(&info->trans_lock);
1669         trans = info->running_transaction;
1670         if (trans)
1671                 ret = (trans->state >= TRANS_STATE_COMMIT_START);
1672         spin_unlock(&info->trans_lock);
1673         return ret;
1674 }
1675
1676 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1677 {
1678         struct btrfs_transaction *trans;
1679         int ret = 0;
1680
1681         spin_lock(&info->trans_lock);
1682         trans = info->running_transaction;
1683         if (trans)
1684                 ret = is_transaction_blocked(trans);
1685         spin_unlock(&info->trans_lock);
1686         return ret;
1687 }
1688
1689 /*
1690  * wait for the current transaction commit to start and block subsequent
1691  * transaction joins
1692  */
1693 static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
1694                                             struct btrfs_transaction *trans)
1695 {
1696         wait_event(fs_info->transaction_blocked_wait,
1697                    trans->state >= TRANS_STATE_COMMIT_START || trans->aborted);
1698 }
1699
1700 /*
1701  * wait for the current transaction to start and then become unblocked.
1702  * caller holds ref.
1703  */
1704 static void wait_current_trans_commit_start_and_unblock(
1705                                         struct btrfs_fs_info *fs_info,
1706                                         struct btrfs_transaction *trans)
1707 {
1708         wait_event(fs_info->transaction_wait,
1709                    trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted);
1710 }
1711
1712 /*
1713  * commit transactions asynchronously. once btrfs_commit_transaction_async
1714  * returns, any subsequent transaction will not be allowed to join.
1715  */
1716 struct btrfs_async_commit {
1717         struct btrfs_trans_handle *newtrans;
1718         struct work_struct work;
1719 };
1720
1721 static void do_async_commit(struct work_struct *work)
1722 {
1723         struct btrfs_async_commit *ac =
1724                 container_of(work, struct btrfs_async_commit, work);
1725
1726         /*
1727          * We've got freeze protection passed with the transaction.
1728          * Tell lockdep about it.
1729          */
1730         if (ac->newtrans->type & __TRANS_FREEZABLE)
1731                 __sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
1732
1733         current->journal_info = ac->newtrans;
1734
1735         btrfs_commit_transaction(ac->newtrans);
1736         kfree(ac);
1737 }
1738
1739 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1740                                    int wait_for_unblock)
1741 {
1742         struct btrfs_fs_info *fs_info = trans->fs_info;
1743         struct btrfs_async_commit *ac;
1744         struct btrfs_transaction *cur_trans;
1745
1746         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1747         if (!ac)
1748                 return -ENOMEM;
1749
1750         INIT_WORK(&ac->work, do_async_commit);
1751         ac->newtrans = btrfs_join_transaction(trans->root);
1752         if (IS_ERR(ac->newtrans)) {
1753                 int err = PTR_ERR(ac->newtrans);
1754                 kfree(ac);
1755                 return err;
1756         }
1757
1758         /* take transaction reference */
1759         cur_trans = trans->transaction;
1760         refcount_inc(&cur_trans->use_count);
1761
1762         btrfs_end_transaction(trans);
1763
1764         /*
1765          * Tell lockdep we've released the freeze rwsem, since the
1766          * async commit thread will be the one to unlock it.
1767          */
1768         if (ac->newtrans->type & __TRANS_FREEZABLE)
1769                 __sb_writers_release(fs_info->sb, SB_FREEZE_FS);
1770
1771         schedule_work(&ac->work);
1772
1773         /* wait for transaction to start and unblock */
1774         if (wait_for_unblock)
1775                 wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
1776         else
1777                 wait_current_trans_commit_start(fs_info, cur_trans);
1778
1779         if (current->journal_info == trans)
1780                 current->journal_info = NULL;
1781
1782         btrfs_put_transaction(cur_trans);
1783         return 0;
1784 }
1785
1786
1787 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1788 {
1789         struct btrfs_fs_info *fs_info = trans->fs_info;
1790         struct btrfs_transaction *cur_trans = trans->transaction;
1791
1792         WARN_ON(refcount_read(&trans->use_count) > 1);
1793
1794         btrfs_abort_transaction(trans, err);
1795
1796         spin_lock(&fs_info->trans_lock);
1797
1798         /*
1799          * If the transaction is removed from the list, it means this
1800          * transaction has been committed successfully, so it is impossible
1801          * to call the cleanup function.
1802          */
1803         BUG_ON(list_empty(&cur_trans->list));
1804
1805         list_del_init(&cur_trans->list);
1806         if (cur_trans == fs_info->running_transaction) {
1807                 cur_trans->state = TRANS_STATE_COMMIT_DOING;
1808                 spin_unlock(&fs_info->trans_lock);
1809                 wait_event(cur_trans->writer_wait,
1810                            atomic_read(&cur_trans->num_writers) == 1);
1811
1812                 spin_lock(&fs_info->trans_lock);
1813         }
1814         spin_unlock(&fs_info->trans_lock);
1815
1816         btrfs_cleanup_one_transaction(trans->transaction, fs_info);
1817
1818         spin_lock(&fs_info->trans_lock);
1819         if (cur_trans == fs_info->running_transaction)
1820                 fs_info->running_transaction = NULL;
1821         spin_unlock(&fs_info->trans_lock);
1822
1823         if (trans->type & __TRANS_FREEZABLE)
1824                 sb_end_intwrite(fs_info->sb);
1825         btrfs_put_transaction(cur_trans);
1826         btrfs_put_transaction(cur_trans);
1827
1828         trace_btrfs_transaction_commit(trans->root);
1829
1830         if (current->journal_info == trans)
1831                 current->journal_info = NULL;
1832         btrfs_scrub_cancel(fs_info);
1833
1834         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1835 }
1836
1837 /*
1838  * Release reserved delayed ref space of all pending block groups of the
1839  * transaction and remove them from the list
1840  */
1841 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
1842 {
1843        struct btrfs_fs_info *fs_info = trans->fs_info;
1844        struct btrfs_block_group_cache *block_group, *tmp;
1845
1846        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
1847                btrfs_delayed_refs_rsv_release(fs_info, 1);
1848                list_del_init(&block_group->bg_list);
1849        }
1850 }
1851
1852 static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
1853 {
1854         struct btrfs_fs_info *fs_info = trans->fs_info;
1855
1856         /*
1857          * We use writeback_inodes_sb here because if we used
1858          * btrfs_start_delalloc_roots we would deadlock with fs freeze.
1859          * Currently are holding the fs freeze lock, if we do an async flush
1860          * we'll do btrfs_join_transaction() and deadlock because we need to
1861          * wait for the fs freeze lock.  Using the direct flushing we benefit
1862          * from already being in a transaction and our join_transaction doesn't
1863          * have to re-take the fs freeze lock.
1864          */
1865         if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1866                 writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
1867         } else {
1868                 struct btrfs_pending_snapshot *pending;
1869                 struct list_head *head = &trans->transaction->pending_snapshots;
1870
1871                 /*
1872                  * Flush dellaloc for any root that is going to be snapshotted.
1873                  * This is done to avoid a corrupted version of files, in the
1874                  * snapshots, that had both buffered and direct IO writes (even
1875                  * if they were done sequentially) due to an unordered update of
1876                  * the inode's size on disk.
1877                  */
1878                 list_for_each_entry(pending, head, list) {
1879                         int ret;
1880
1881                         ret = btrfs_start_delalloc_snapshot(pending->root);
1882                         if (ret)
1883                                 return ret;
1884                 }
1885         }
1886         return 0;
1887 }
1888
1889 static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
1890 {
1891         struct btrfs_fs_info *fs_info = trans->fs_info;
1892
1893         if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1894                 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1895         } else {
1896                 struct btrfs_pending_snapshot *pending;
1897                 struct list_head *head = &trans->transaction->pending_snapshots;
1898
1899                 /*
1900                  * Wait for any dellaloc that we started previously for the roots
1901                  * that are going to be snapshotted. This is to avoid a corrupted
1902                  * version of files in the snapshots that had both buffered and
1903                  * direct IO writes (even if they were done sequentially).
1904                  */
1905                 list_for_each_entry(pending, head, list)
1906                         btrfs_wait_ordered_extents(pending->root,
1907                                                    U64_MAX, 0, U64_MAX);
1908         }
1909 }
1910
1911 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
1912 {
1913         struct btrfs_fs_info *fs_info = trans->fs_info;
1914         struct btrfs_transaction *cur_trans = trans->transaction;
1915         struct btrfs_transaction *prev_trans = NULL;
1916         int ret;
1917
1918         /* Stop the commit early if ->aborted is set */
1919         if (unlikely(READ_ONCE(cur_trans->aborted))) {
1920                 ret = cur_trans->aborted;
1921                 btrfs_end_transaction(trans);
1922                 return ret;
1923         }
1924
1925         btrfs_trans_release_metadata(trans);
1926         trans->block_rsv = NULL;
1927
1928         /* make a pass through all the delayed refs we have so far
1929          * any runnings procs may add more while we are here
1930          */
1931         ret = btrfs_run_delayed_refs(trans, 0);
1932         if (ret) {
1933                 btrfs_end_transaction(trans);
1934                 return ret;
1935         }
1936
1937         cur_trans = trans->transaction;
1938
1939         /*
1940          * set the flushing flag so procs in this transaction have to
1941          * start sending their work down.
1942          */
1943         cur_trans->delayed_refs.flushing = 1;
1944         smp_wmb();
1945
1946         btrfs_create_pending_block_groups(trans);
1947
1948         ret = btrfs_run_delayed_refs(trans, 0);
1949         if (ret) {
1950                 btrfs_end_transaction(trans);
1951                 return ret;
1952         }
1953
1954         if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
1955                 int run_it = 0;
1956
1957                 /* this mutex is also taken before trying to set
1958                  * block groups readonly.  We need to make sure
1959                  * that nobody has set a block group readonly
1960                  * after a extents from that block group have been
1961                  * allocated for cache files.  btrfs_set_block_group_ro
1962                  * will wait for the transaction to commit if it
1963                  * finds BTRFS_TRANS_DIRTY_BG_RUN set.
1964                  *
1965                  * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure
1966                  * only one process starts all the block group IO.  It wouldn't
1967                  * hurt to have more than one go through, but there's no
1968                  * real advantage to it either.
1969                  */
1970                 mutex_lock(&fs_info->ro_block_group_mutex);
1971                 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
1972                                       &cur_trans->flags))
1973                         run_it = 1;
1974                 mutex_unlock(&fs_info->ro_block_group_mutex);
1975
1976                 if (run_it) {
1977                         ret = btrfs_start_dirty_block_groups(trans);
1978                         if (ret) {
1979                                 btrfs_end_transaction(trans);
1980                                 return ret;
1981                         }
1982                 }
1983         }
1984
1985         spin_lock(&fs_info->trans_lock);
1986         if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
1987                 spin_unlock(&fs_info->trans_lock);
1988                 refcount_inc(&cur_trans->use_count);
1989                 ret = btrfs_end_transaction(trans);
1990
1991                 wait_for_commit(cur_trans);
1992
1993                 if (unlikely(cur_trans->aborted))
1994                         ret = cur_trans->aborted;
1995
1996                 btrfs_put_transaction(cur_trans);
1997
1998                 return ret;
1999         }
2000
2001         cur_trans->state = TRANS_STATE_COMMIT_START;
2002         wake_up(&fs_info->transaction_blocked_wait);
2003
2004         if (cur_trans->list.prev != &fs_info->trans_list) {
2005                 prev_trans = list_entry(cur_trans->list.prev,
2006                                         struct btrfs_transaction, list);
2007                 if (prev_trans->state != TRANS_STATE_COMPLETED) {
2008                         refcount_inc(&prev_trans->use_count);
2009                         spin_unlock(&fs_info->trans_lock);
2010
2011                         wait_for_commit(prev_trans);
2012                         ret = prev_trans->aborted;
2013
2014                         btrfs_put_transaction(prev_trans);
2015                         if (ret)
2016                                 goto cleanup_transaction;
2017                 } else {
2018                         spin_unlock(&fs_info->trans_lock);
2019                 }
2020         } else {
2021                 spin_unlock(&fs_info->trans_lock);
2022         }
2023
2024         extwriter_counter_dec(cur_trans, trans->type);
2025
2026         ret = btrfs_start_delalloc_flush(trans);
2027         if (ret)
2028                 goto cleanup_transaction;
2029
2030         ret = btrfs_run_delayed_items(trans);
2031         if (ret)
2032                 goto cleanup_transaction;
2033
2034         wait_event(cur_trans->writer_wait,
2035                    extwriter_counter_read(cur_trans) == 0);
2036
2037         /* some pending stuffs might be added after the previous flush. */
2038         ret = btrfs_run_delayed_items(trans);
2039         if (ret)
2040                 goto cleanup_transaction;
2041
2042         btrfs_wait_delalloc_flush(trans);
2043
2044         btrfs_scrub_pause(fs_info);
2045         /*
2046          * Ok now we need to make sure to block out any other joins while we
2047          * commit the transaction.  We could have started a join before setting
2048          * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
2049          */
2050         spin_lock(&fs_info->trans_lock);
2051         cur_trans->state = TRANS_STATE_COMMIT_DOING;
2052         spin_unlock(&fs_info->trans_lock);
2053         wait_event(cur_trans->writer_wait,
2054                    atomic_read(&cur_trans->num_writers) == 1);
2055
2056         /* ->aborted might be set after the previous check, so check it */
2057         if (unlikely(READ_ONCE(cur_trans->aborted))) {
2058                 ret = cur_trans->aborted;
2059                 goto scrub_continue;
2060         }
2061         /*
2062          * the reloc mutex makes sure that we stop
2063          * the balancing code from coming in and moving
2064          * extents around in the middle of the commit
2065          */
2066         mutex_lock(&fs_info->reloc_mutex);
2067
2068         /*
2069          * We needn't worry about the delayed items because we will
2070          * deal with them in create_pending_snapshot(), which is the
2071          * core function of the snapshot creation.
2072          */
2073         ret = create_pending_snapshots(trans);
2074         if (ret) {
2075                 mutex_unlock(&fs_info->reloc_mutex);
2076                 goto scrub_continue;
2077         }
2078
2079         /*
2080          * We insert the dir indexes of the snapshots and update the inode
2081          * of the snapshots' parents after the snapshot creation, so there
2082          * are some delayed items which are not dealt with. Now deal with
2083          * them.
2084          *
2085          * We needn't worry that this operation will corrupt the snapshots,
2086          * because all the tree which are snapshoted will be forced to COW
2087          * the nodes and leaves.
2088          */
2089         ret = btrfs_run_delayed_items(trans);
2090         if (ret) {
2091                 mutex_unlock(&fs_info->reloc_mutex);
2092                 goto scrub_continue;
2093         }
2094
2095         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2096         if (ret) {
2097                 mutex_unlock(&fs_info->reloc_mutex);
2098                 goto scrub_continue;
2099         }
2100
2101         /*
2102          * make sure none of the code above managed to slip in a
2103          * delayed item
2104          */
2105         btrfs_assert_delayed_root_empty(fs_info);
2106
2107         WARN_ON(cur_trans != trans->transaction);
2108
2109         /* btrfs_commit_tree_roots is responsible for getting the
2110          * various roots consistent with each other.  Every pointer
2111          * in the tree of tree roots has to point to the most up to date
2112          * root for every subvolume and other tree.  So, we have to keep
2113          * the tree logging code from jumping in and changing any
2114          * of the trees.
2115          *
2116          * At this point in the commit, there can't be any tree-log
2117          * writers, but a little lower down we drop the trans mutex
2118          * and let new people in.  By holding the tree_log_mutex
2119          * from now until after the super is written, we avoid races
2120          * with the tree-log code.
2121          */
2122         mutex_lock(&fs_info->tree_log_mutex);
2123
2124         ret = commit_fs_roots(trans);
2125         if (ret) {
2126                 mutex_unlock(&fs_info->tree_log_mutex);
2127                 mutex_unlock(&fs_info->reloc_mutex);
2128                 goto scrub_continue;
2129         }
2130
2131         /*
2132          * Since the transaction is done, we can apply the pending changes
2133          * before the next transaction.
2134          */
2135         btrfs_apply_pending_changes(fs_info);
2136
2137         /* commit_fs_roots gets rid of all the tree log roots, it is now
2138          * safe to free the root of tree log roots
2139          */
2140         btrfs_free_log_root_tree(trans, fs_info);
2141
2142         /*
2143          * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
2144          * new delayed refs. Must handle them or qgroup can be wrong.
2145          */
2146         ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2147         if (ret) {
2148                 mutex_unlock(&fs_info->tree_log_mutex);
2149                 mutex_unlock(&fs_info->reloc_mutex);
2150                 goto scrub_continue;
2151         }
2152
2153         /*
2154          * Since fs roots are all committed, we can get a quite accurate
2155          * new_roots. So let's do quota accounting.
2156          */
2157         ret = btrfs_qgroup_account_extents(trans);
2158         if (ret < 0) {
2159                 mutex_unlock(&fs_info->tree_log_mutex);
2160                 mutex_unlock(&fs_info->reloc_mutex);
2161                 goto scrub_continue;
2162         }
2163
2164         ret = commit_cowonly_roots(trans);
2165         if (ret) {
2166                 mutex_unlock(&fs_info->tree_log_mutex);
2167                 mutex_unlock(&fs_info->reloc_mutex);
2168                 goto scrub_continue;
2169         }
2170
2171         /*
2172          * The tasks which save the space cache and inode cache may also
2173          * update ->aborted, check it.
2174          */
2175         if (unlikely(READ_ONCE(cur_trans->aborted))) {
2176                 ret = cur_trans->aborted;
2177                 mutex_unlock(&fs_info->tree_log_mutex);
2178                 mutex_unlock(&fs_info->reloc_mutex);
2179                 goto scrub_continue;
2180         }
2181
2182         btrfs_prepare_extent_commit(fs_info);
2183
2184         cur_trans = fs_info->running_transaction;
2185
2186         btrfs_set_root_node(&fs_info->tree_root->root_item,
2187                             fs_info->tree_root->node);
2188         list_add_tail(&fs_info->tree_root->dirty_list,
2189                       &cur_trans->switch_commits);
2190
2191         btrfs_set_root_node(&fs_info->chunk_root->root_item,
2192                             fs_info->chunk_root->node);
2193         list_add_tail(&fs_info->chunk_root->dirty_list,
2194                       &cur_trans->switch_commits);
2195
2196         switch_commit_roots(cur_trans);
2197
2198         ASSERT(list_empty(&cur_trans->dirty_bgs));
2199         ASSERT(list_empty(&cur_trans->io_bgs));
2200         update_super_roots(fs_info);
2201
2202         btrfs_set_super_log_root(fs_info->super_copy, 0);
2203         btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2204         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2205                sizeof(*fs_info->super_copy));
2206
2207         btrfs_commit_device_sizes(cur_trans);
2208
2209         clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2210         clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2211
2212         btrfs_trans_release_chunk_metadata(trans);
2213
2214         spin_lock(&fs_info->trans_lock);
2215         cur_trans->state = TRANS_STATE_UNBLOCKED;
2216         fs_info->running_transaction = NULL;
2217         spin_unlock(&fs_info->trans_lock);
2218         mutex_unlock(&fs_info->reloc_mutex);
2219
2220         wake_up(&fs_info->transaction_wait);
2221
2222         ret = btrfs_write_and_wait_transaction(trans);
2223         if (ret) {
2224                 btrfs_handle_fs_error(fs_info, ret,
2225                                       "Error while writing out transaction");
2226                 mutex_unlock(&fs_info->tree_log_mutex);
2227                 goto scrub_continue;
2228         }
2229
2230         ret = write_all_supers(fs_info, 0);
2231         /*
2232          * the super is written, we can safely allow the tree-loggers
2233          * to go about their business
2234          */
2235         mutex_unlock(&fs_info->tree_log_mutex);
2236         if (ret)
2237                 goto scrub_continue;
2238
2239         btrfs_finish_extent_commit(trans);
2240
2241         if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2242                 btrfs_clear_space_info_full(fs_info);
2243
2244         fs_info->last_trans_committed = cur_trans->transid;
2245         /*
2246          * We needn't acquire the lock here because there is no other task
2247          * which can change it.
2248          */
2249         cur_trans->state = TRANS_STATE_COMPLETED;
2250         wake_up(&cur_trans->commit_wait);
2251         clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
2252
2253         spin_lock(&fs_info->trans_lock);
2254         list_del_init(&cur_trans->list);
2255         spin_unlock(&fs_info->trans_lock);
2256
2257         btrfs_put_transaction(cur_trans);
2258         btrfs_put_transaction(cur_trans);
2259
2260         if (trans->type & __TRANS_FREEZABLE)
2261                 sb_end_intwrite(fs_info->sb);
2262
2263         trace_btrfs_transaction_commit(trans->root);
2264
2265         btrfs_scrub_continue(fs_info);
2266
2267         if (current->journal_info == trans)
2268                 current->journal_info = NULL;
2269
2270         kmem_cache_free(btrfs_trans_handle_cachep, trans);
2271
2272         return ret;
2273
2274 scrub_continue:
2275         btrfs_scrub_continue(fs_info);
2276 cleanup_transaction:
2277         btrfs_trans_release_metadata(trans);
2278         btrfs_cleanup_pending_block_groups(trans);
2279         btrfs_trans_release_chunk_metadata(trans);
2280         trans->block_rsv = NULL;
2281         btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2282         if (current->journal_info == trans)
2283                 current->journal_info = NULL;
2284         cleanup_transaction(trans, ret);
2285
2286         return ret;
2287 }
2288
2289 /*
2290  * return < 0 if error
2291  * 0 if there are no more dead_roots at the time of call
2292  * 1 there are more to be processed, call me again
2293  *
2294  * The return value indicates there are certainly more snapshots to delete, but
2295  * if there comes a new one during processing, it may return 0. We don't mind,
2296  * because btrfs_commit_super will poke cleaner thread and it will process it a
2297  * few seconds later.
2298  */
2299 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
2300 {
2301         int ret;
2302         struct btrfs_fs_info *fs_info = root->fs_info;
2303
2304         spin_lock(&fs_info->trans_lock);
2305         if (list_empty(&fs_info->dead_roots)) {
2306                 spin_unlock(&fs_info->trans_lock);
2307                 return 0;
2308         }
2309         root = list_first_entry(&fs_info->dead_roots,
2310                         struct btrfs_root, root_list);
2311         list_del_init(&root->root_list);
2312         spin_unlock(&fs_info->trans_lock);
2313
2314         btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
2315
2316         btrfs_kill_all_delayed_nodes(root);
2317
2318         if (btrfs_header_backref_rev(root->node) <
2319                         BTRFS_MIXED_BACKREF_REV)
2320                 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
2321         else
2322                 ret = btrfs_drop_snapshot(root, NULL, 1, 0);
2323
2324         return (ret < 0) ? 0 : 1;
2325 }
2326
2327 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2328 {
2329         unsigned long prev;
2330         unsigned long bit;
2331
2332         prev = xchg(&fs_info->pending_changes, 0);
2333         if (!prev)
2334                 return;
2335
2336         bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
2337         if (prev & bit)
2338                 btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2339         prev &= ~bit;
2340
2341         bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
2342         if (prev & bit)
2343                 btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2344         prev &= ~bit;
2345
2346         bit = 1 << BTRFS_PENDING_COMMIT;
2347         if (prev & bit)
2348                 btrfs_debug(fs_info, "pending commit done");
2349         prev &= ~bit;
2350
2351         if (prev)
2352                 btrfs_warn(fs_info,
2353                         "unknown pending changes left 0x%lx, ignoring", prev);
2354 }