1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
16 #include "print-tree.h"
18 #include "compression.h"
20 #include "inode-map.h"
22 /* magic values for the inode_only field in btrfs_log_inode:
24 * LOG_INODE_ALL means to log everything
25 * LOG_INODE_EXISTS means to log just enough to recreate the inode
36 * directory trouble cases
38 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
39 * log, we must force a full commit before doing an fsync of the directory
40 * where the unlink was done.
41 * ---> record transid of last unlink/rename per directory
45 * rename foo/some_dir foo2/some_dir
47 * fsync foo/some_dir/some_file
49 * The fsync above will unlink the original some_dir without recording
50 * it in its new location (foo2). After a crash, some_dir will be gone
51 * unless the fsync of some_file forces a full commit
53 * 2) we must log any new names for any file or dir that is in the fsync
54 * log. ---> check inode while renaming/linking.
56 * 2a) we must log any new names for any file or dir during rename
57 * when the directory they are being removed from was logged.
58 * ---> check inode and old parent dir during rename
60 * 2a is actually the more important variant. With the extra logging
61 * a crash might unlink the old name without recreating the new one
63 * 3) after a crash, we must go through any directories with a link count
64 * of zero and redo the rm -rf
71 * The directory f1 was fully removed from the FS, but fsync was never
72 * called on f1, only its parent dir. After a crash the rm -rf must
73 * be replayed. This must be able to recurse down the entire
74 * directory tree. The inode link count fixup code takes care of the
79 * stages for the tree walking. The first
80 * stage (0) is to only pin down the blocks we find
81 * the second stage (1) is to make sure that all the inodes
82 * we find in the log are created in the subvolume.
84 * The last stage is to deal with directories and links and extents
85 * and all the other fun semantics
89 LOG_WALK_REPLAY_INODES,
90 LOG_WALK_REPLAY_DIR_INDEX,
94 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
95 struct btrfs_root *root, struct btrfs_inode *inode,
99 struct btrfs_log_ctx *ctx);
100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root,
102 struct btrfs_path *path, u64 objectid);
103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
104 struct btrfs_root *root,
105 struct btrfs_root *log,
106 struct btrfs_path *path,
107 u64 dirid, int del_all);
110 * tree logging is a special write ahead log used to make sure that
111 * fsyncs and O_SYNCs can happen without doing full tree commits.
113 * Full tree commits are expensive because they require commonly
114 * modified blocks to be recowed, creating many dirty pages in the
115 * extent tree an 4x-6x higher write load than ext3.
117 * Instead of doing a tree commit on every fsync, we use the
118 * key ranges and transaction ids to find items for a given file or directory
119 * that have changed in this transaction. Those items are copied into
120 * a special tree (one per subvolume root), that tree is written to disk
121 * and then the fsync is considered complete.
123 * After a crash, items are copied out of the log-tree back into the
124 * subvolume tree. Any file data extents found are recorded in the extent
125 * allocation tree, and the log-tree freed.
127 * The log tree is read three times, once to pin down all the extents it is
128 * using in ram and once, once to create all the inodes logged in the tree
129 * and once to do all the other items.
133 * start a sub transaction and setup the log tree
134 * this increments the log tree writer count to make the people
135 * syncing the tree wait for us to finish
137 static int start_log_trans(struct btrfs_trans_handle *trans,
138 struct btrfs_root *root,
139 struct btrfs_log_ctx *ctx)
141 struct btrfs_fs_info *fs_info = root->fs_info;
144 mutex_lock(&root->log_mutex);
146 if (root->log_root) {
147 if (btrfs_need_log_full_commit(trans)) {
152 if (!root->log_start_pid) {
153 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
154 root->log_start_pid = current->pid;
155 } else if (root->log_start_pid != current->pid) {
156 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
159 mutex_lock(&fs_info->tree_log_mutex);
160 if (!fs_info->log_root_tree)
161 ret = btrfs_init_log_root_tree(trans, fs_info);
162 mutex_unlock(&fs_info->tree_log_mutex);
166 ret = btrfs_add_log_tree(trans, root);
170 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
171 root->log_start_pid = current->pid;
174 atomic_inc(&root->log_batch);
175 atomic_inc(&root->log_writers);
177 int index = root->log_transid % 2;
178 list_add_tail(&ctx->list, &root->log_ctxs[index]);
179 ctx->log_transid = root->log_transid;
183 mutex_unlock(&root->log_mutex);
188 * returns 0 if there was a log transaction running and we were able
189 * to join, or returns -ENOENT if there were not transactions
192 static int join_running_log_trans(struct btrfs_root *root)
196 mutex_lock(&root->log_mutex);
197 if (root->log_root) {
199 atomic_inc(&root->log_writers);
201 mutex_unlock(&root->log_mutex);
206 * This either makes the current running log transaction wait
207 * until you call btrfs_end_log_trans() or it makes any future
208 * log transactions wait until you call btrfs_end_log_trans()
210 void btrfs_pin_log_trans(struct btrfs_root *root)
212 mutex_lock(&root->log_mutex);
213 atomic_inc(&root->log_writers);
214 mutex_unlock(&root->log_mutex);
218 * indicate we're done making changes to the log tree
219 * and wake up anyone waiting to do a sync
221 void btrfs_end_log_trans(struct btrfs_root *root)
223 if (atomic_dec_and_test(&root->log_writers)) {
224 /* atomic_dec_and_test implies a barrier */
225 cond_wake_up_nomb(&root->log_writer_wait);
229 static int btrfs_write_tree_block(struct extent_buffer *buf)
231 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
232 buf->start + buf->len - 1);
235 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
237 filemap_fdatawait_range(buf->pages[0]->mapping,
238 buf->start, buf->start + buf->len - 1);
242 * the walk control struct is used to pass state down the chain when
243 * processing the log tree. The stage field tells us which part
244 * of the log tree processing we are currently doing. The others
245 * are state fields used for that specific part
247 struct walk_control {
248 /* should we free the extent on disk when done? This is used
249 * at transaction commit time while freeing a log tree
253 /* should we write out the extent buffer? This is used
254 * while flushing the log tree to disk during a sync
258 /* should we wait for the extent buffer io to finish? Also used
259 * while flushing the log tree to disk for a sync
263 /* pin only walk, we record which extents on disk belong to the
268 /* what stage of the replay code we're currently in */
272 * Ignore any items from the inode currently being processed. Needs
273 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
274 * the LOG_WALK_REPLAY_INODES stage.
276 bool ignore_cur_inode;
278 /* the root we are currently replaying */
279 struct btrfs_root *replay_dest;
281 /* the trans handle for the current replay */
282 struct btrfs_trans_handle *trans;
284 /* the function that gets used to process blocks we find in the
285 * tree. Note the extent_buffer might not be up to date when it is
286 * passed in, and it must be checked or read if you need the data
289 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
290 struct walk_control *wc, u64 gen, int level);
294 * process_func used to pin down extents, write them or wait on them
296 static int process_one_buffer(struct btrfs_root *log,
297 struct extent_buffer *eb,
298 struct walk_control *wc, u64 gen, int level)
300 struct btrfs_fs_info *fs_info = log->fs_info;
304 * If this fs is mixed then we need to be able to process the leaves to
305 * pin down any logged extents, so we have to read the block.
307 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
308 ret = btrfs_read_buffer(eb, gen, level, NULL);
314 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
317 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
318 if (wc->pin && btrfs_header_level(eb) == 0)
319 ret = btrfs_exclude_logged_extents(eb);
321 btrfs_write_tree_block(eb);
323 btrfs_wait_tree_block_writeback(eb);
329 * Item overwrite used by replay and tree logging. eb, slot and key all refer
330 * to the src data we are copying out.
332 * root is the tree we are copying into, and path is a scratch
333 * path for use in this function (it should be released on entry and
334 * will be released on exit).
336 * If the key is already in the destination tree the existing item is
337 * overwritten. If the existing item isn't big enough, it is extended.
338 * If it is too large, it is truncated.
340 * If the key isn't in the destination yet, a new item is inserted.
342 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
343 struct btrfs_root *root,
344 struct btrfs_path *path,
345 struct extent_buffer *eb, int slot,
346 struct btrfs_key *key)
350 u64 saved_i_size = 0;
351 int save_old_i_size = 0;
352 unsigned long src_ptr;
353 unsigned long dst_ptr;
354 int overwrite_root = 0;
355 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
357 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
360 item_size = btrfs_item_size_nr(eb, slot);
361 src_ptr = btrfs_item_ptr_offset(eb, slot);
363 /* look for the key in the destination tree */
364 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
371 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
373 if (dst_size != item_size)
376 if (item_size == 0) {
377 btrfs_release_path(path);
380 dst_copy = kmalloc(item_size, GFP_NOFS);
381 src_copy = kmalloc(item_size, GFP_NOFS);
382 if (!dst_copy || !src_copy) {
383 btrfs_release_path(path);
389 read_extent_buffer(eb, src_copy, src_ptr, item_size);
391 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
392 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
394 ret = memcmp(dst_copy, src_copy, item_size);
399 * they have the same contents, just return, this saves
400 * us from cowing blocks in the destination tree and doing
401 * extra writes that may not have been done by a previous
405 btrfs_release_path(path);
410 * We need to load the old nbytes into the inode so when we
411 * replay the extents we've logged we get the right nbytes.
414 struct btrfs_inode_item *item;
418 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
419 struct btrfs_inode_item);
420 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
421 item = btrfs_item_ptr(eb, slot,
422 struct btrfs_inode_item);
423 btrfs_set_inode_nbytes(eb, item, nbytes);
426 * If this is a directory we need to reset the i_size to
427 * 0 so that we can set it up properly when replaying
428 * the rest of the items in this log.
430 mode = btrfs_inode_mode(eb, item);
432 btrfs_set_inode_size(eb, item, 0);
434 } else if (inode_item) {
435 struct btrfs_inode_item *item;
439 * New inode, set nbytes to 0 so that the nbytes comes out
440 * properly when we replay the extents.
442 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
443 btrfs_set_inode_nbytes(eb, item, 0);
446 * If this is a directory we need to reset the i_size to 0 so
447 * that we can set it up properly when replaying the rest of
448 * the items in this log.
450 mode = btrfs_inode_mode(eb, item);
452 btrfs_set_inode_size(eb, item, 0);
455 btrfs_release_path(path);
456 /* try to insert the key into the destination tree */
457 path->skip_release_on_error = 1;
458 ret = btrfs_insert_empty_item(trans, root, path,
460 path->skip_release_on_error = 0;
462 /* make sure any existing item is the correct size */
463 if (ret == -EEXIST || ret == -EOVERFLOW) {
465 found_size = btrfs_item_size_nr(path->nodes[0],
467 if (found_size > item_size)
468 btrfs_truncate_item(path, item_size, 1);
469 else if (found_size < item_size)
470 btrfs_extend_item(path, item_size - found_size);
474 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
477 /* don't overwrite an existing inode if the generation number
478 * was logged as zero. This is done when the tree logging code
479 * is just logging an inode to make sure it exists after recovery.
481 * Also, don't overwrite i_size on directories during replay.
482 * log replay inserts and removes directory items based on the
483 * state of the tree found in the subvolume, and i_size is modified
486 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
487 struct btrfs_inode_item *src_item;
488 struct btrfs_inode_item *dst_item;
490 src_item = (struct btrfs_inode_item *)src_ptr;
491 dst_item = (struct btrfs_inode_item *)dst_ptr;
493 if (btrfs_inode_generation(eb, src_item) == 0) {
494 struct extent_buffer *dst_eb = path->nodes[0];
495 const u64 ino_size = btrfs_inode_size(eb, src_item);
498 * For regular files an ino_size == 0 is used only when
499 * logging that an inode exists, as part of a directory
500 * fsync, and the inode wasn't fsynced before. In this
501 * case don't set the size of the inode in the fs/subvol
502 * tree, otherwise we would be throwing valid data away.
504 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
505 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
507 struct btrfs_map_token token;
509 btrfs_init_map_token(&token, dst_eb);
510 btrfs_set_token_inode_size(dst_eb, dst_item,
516 if (overwrite_root &&
517 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
518 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
520 saved_i_size = btrfs_inode_size(path->nodes[0],
525 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
528 if (save_old_i_size) {
529 struct btrfs_inode_item *dst_item;
530 dst_item = (struct btrfs_inode_item *)dst_ptr;
531 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
534 /* make sure the generation is filled in */
535 if (key->type == BTRFS_INODE_ITEM_KEY) {
536 struct btrfs_inode_item *dst_item;
537 dst_item = (struct btrfs_inode_item *)dst_ptr;
538 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
539 btrfs_set_inode_generation(path->nodes[0], dst_item,
544 btrfs_mark_buffer_dirty(path->nodes[0]);
545 btrfs_release_path(path);
550 * simple helper to read an inode off the disk from a given root
551 * This can only be called for subvolume roots and not for the log
553 static noinline struct inode *read_one_inode(struct btrfs_root *root,
556 struct btrfs_key key;
559 key.objectid = objectid;
560 key.type = BTRFS_INODE_ITEM_KEY;
562 inode = btrfs_iget(root->fs_info->sb, &key, root);
568 /* replays a single extent in 'eb' at 'slot' with 'key' into the
569 * subvolume 'root'. path is released on entry and should be released
572 * extents in the log tree have not been allocated out of the extent
573 * tree yet. So, this completes the allocation, taking a reference
574 * as required if the extent already exists or creating a new extent
575 * if it isn't in the extent allocation tree yet.
577 * The extent is inserted into the file, dropping any existing extents
578 * from the file that overlap the new one.
580 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
581 struct btrfs_root *root,
582 struct btrfs_path *path,
583 struct extent_buffer *eb, int slot,
584 struct btrfs_key *key)
586 struct btrfs_fs_info *fs_info = root->fs_info;
589 u64 start = key->offset;
591 struct btrfs_file_extent_item *item;
592 struct inode *inode = NULL;
596 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
597 found_type = btrfs_file_extent_type(eb, item);
599 if (found_type == BTRFS_FILE_EXTENT_REG ||
600 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
601 nbytes = btrfs_file_extent_num_bytes(eb, item);
602 extent_end = start + nbytes;
605 * We don't add to the inodes nbytes if we are prealloc or a
608 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
610 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
611 size = btrfs_file_extent_ram_bytes(eb, item);
612 nbytes = btrfs_file_extent_ram_bytes(eb, item);
613 extent_end = ALIGN(start + size,
614 fs_info->sectorsize);
620 inode = read_one_inode(root, key->objectid);
627 * first check to see if we already have this extent in the
628 * file. This must be done before the btrfs_drop_extents run
629 * so we don't try to drop this extent.
631 ret = btrfs_lookup_file_extent(trans, root, path,
632 btrfs_ino(BTRFS_I(inode)), start, 0);
635 (found_type == BTRFS_FILE_EXTENT_REG ||
636 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
637 struct btrfs_file_extent_item cmp1;
638 struct btrfs_file_extent_item cmp2;
639 struct btrfs_file_extent_item *existing;
640 struct extent_buffer *leaf;
642 leaf = path->nodes[0];
643 existing = btrfs_item_ptr(leaf, path->slots[0],
644 struct btrfs_file_extent_item);
646 read_extent_buffer(eb, &cmp1, (unsigned long)item,
648 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
652 * we already have a pointer to this exact extent,
653 * we don't have to do anything
655 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
656 btrfs_release_path(path);
660 btrfs_release_path(path);
662 /* drop any overlapping extents */
663 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
667 if (found_type == BTRFS_FILE_EXTENT_REG ||
668 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
670 unsigned long dest_offset;
671 struct btrfs_key ins;
673 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
674 btrfs_fs_incompat(fs_info, NO_HOLES))
677 ret = btrfs_insert_empty_item(trans, root, path, key,
681 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
683 copy_extent_buffer(path->nodes[0], eb, dest_offset,
684 (unsigned long)item, sizeof(*item));
686 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
687 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
688 ins.type = BTRFS_EXTENT_ITEM_KEY;
689 offset = key->offset - btrfs_file_extent_offset(eb, item);
692 * Manually record dirty extent, as here we did a shallow
693 * file extent item copy and skip normal backref update,
694 * but modifying extent tree all by ourselves.
695 * So need to manually record dirty extent for qgroup,
696 * as the owner of the file extent changed from log tree
697 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
699 ret = btrfs_qgroup_trace_extent(trans,
700 btrfs_file_extent_disk_bytenr(eb, item),
701 btrfs_file_extent_disk_num_bytes(eb, item),
706 if (ins.objectid > 0) {
707 struct btrfs_ref ref = { 0 };
710 LIST_HEAD(ordered_sums);
713 * is this extent already allocated in the extent
714 * allocation tree? If so, just add a reference
716 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
719 btrfs_init_generic_ref(&ref,
720 BTRFS_ADD_DELAYED_REF,
721 ins.objectid, ins.offset, 0);
722 btrfs_init_data_ref(&ref,
723 root->root_key.objectid,
724 key->objectid, offset);
725 ret = btrfs_inc_extent_ref(trans, &ref);
730 * insert the extent pointer in the extent
733 ret = btrfs_alloc_logged_file_extent(trans,
734 root->root_key.objectid,
735 key->objectid, offset, &ins);
739 btrfs_release_path(path);
741 if (btrfs_file_extent_compression(eb, item)) {
742 csum_start = ins.objectid;
743 csum_end = csum_start + ins.offset;
745 csum_start = ins.objectid +
746 btrfs_file_extent_offset(eb, item);
747 csum_end = csum_start +
748 btrfs_file_extent_num_bytes(eb, item);
751 ret = btrfs_lookup_csums_range(root->log_root,
752 csum_start, csum_end - 1,
757 * Now delete all existing cums in the csum root that
758 * cover our range. We do this because we can have an
759 * extent that is completely referenced by one file
760 * extent item and partially referenced by another
761 * file extent item (like after using the clone or
762 * extent_same ioctls). In this case if we end up doing
763 * the replay of the one that partially references the
764 * extent first, and we do not do the csum deletion
765 * below, we can get 2 csum items in the csum tree that
766 * overlap each other. For example, imagine our log has
767 * the two following file extent items:
769 * key (257 EXTENT_DATA 409600)
770 * extent data disk byte 12845056 nr 102400
771 * extent data offset 20480 nr 20480 ram 102400
773 * key (257 EXTENT_DATA 819200)
774 * extent data disk byte 12845056 nr 102400
775 * extent data offset 0 nr 102400 ram 102400
777 * Where the second one fully references the 100K extent
778 * that starts at disk byte 12845056, and the log tree
779 * has a single csum item that covers the entire range
782 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
784 * After the first file extent item is replayed, the
785 * csum tree gets the following csum item:
787 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
789 * Which covers the 20K sub-range starting at offset 20K
790 * of our extent. Now when we replay the second file
791 * extent item, if we do not delete existing csum items
792 * that cover any of its blocks, we end up getting two
793 * csum items in our csum tree that overlap each other:
795 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
796 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
798 * Which is a problem, because after this anyone trying
799 * to lookup up for the checksum of any block of our
800 * extent starting at an offset of 40K or higher, will
801 * end up looking at the second csum item only, which
802 * does not contain the checksum for any block starting
803 * at offset 40K or higher of our extent.
805 while (!list_empty(&ordered_sums)) {
806 struct btrfs_ordered_sum *sums;
807 sums = list_entry(ordered_sums.next,
808 struct btrfs_ordered_sum,
811 ret = btrfs_del_csums(trans,
816 ret = btrfs_csum_file_blocks(trans,
817 fs_info->csum_root, sums);
818 list_del(&sums->list);
824 btrfs_release_path(path);
826 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
827 /* inline extents are easy, we just overwrite them */
828 ret = overwrite_item(trans, root, path, eb, slot, key);
833 inode_add_bytes(inode, nbytes);
835 ret = btrfs_update_inode(trans, root, inode);
843 * when cleaning up conflicts between the directory names in the
844 * subvolume, directory names in the log and directory names in the
845 * inode back references, we may have to unlink inodes from directories.
847 * This is a helper function to do the unlink of a specific directory
850 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
851 struct btrfs_root *root,
852 struct btrfs_path *path,
853 struct btrfs_inode *dir,
854 struct btrfs_dir_item *di)
859 struct extent_buffer *leaf;
860 struct btrfs_key location;
863 leaf = path->nodes[0];
865 btrfs_dir_item_key_to_cpu(leaf, di, &location);
866 name_len = btrfs_dir_name_len(leaf, di);
867 name = kmalloc(name_len, GFP_NOFS);
871 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
872 btrfs_release_path(path);
874 inode = read_one_inode(root, location.objectid);
880 ret = link_to_fixup_dir(trans, root, path, location.objectid);
884 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
889 ret = btrfs_run_delayed_items(trans);
897 * helper function to see if a given name and sequence number found
898 * in an inode back reference are already in a directory and correctly
899 * point to this inode
901 static noinline int inode_in_dir(struct btrfs_root *root,
902 struct btrfs_path *path,
903 u64 dirid, u64 objectid, u64 index,
904 const char *name, int name_len)
906 struct btrfs_dir_item *di;
907 struct btrfs_key location;
910 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
911 index, name, name_len, 0);
912 if (di && !IS_ERR(di)) {
913 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
914 if (location.objectid != objectid)
918 btrfs_release_path(path);
920 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
921 if (di && !IS_ERR(di)) {
922 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
923 if (location.objectid != objectid)
929 btrfs_release_path(path);
934 * helper function to check a log tree for a named back reference in
935 * an inode. This is used to decide if a back reference that is
936 * found in the subvolume conflicts with what we find in the log.
938 * inode backreferences may have multiple refs in a single item,
939 * during replay we process one reference at a time, and we don't
940 * want to delete valid links to a file from the subvolume if that
941 * link is also in the log.
943 static noinline int backref_in_log(struct btrfs_root *log,
944 struct btrfs_key *key,
946 const char *name, int namelen)
948 struct btrfs_path *path;
951 path = btrfs_alloc_path();
955 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
958 } else if (ret == 1) {
963 if (key->type == BTRFS_INODE_EXTREF_KEY)
964 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
969 ret = !!btrfs_find_name_in_backref(path->nodes[0],
973 btrfs_free_path(path);
977 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
978 struct btrfs_root *root,
979 struct btrfs_path *path,
980 struct btrfs_root *log_root,
981 struct btrfs_inode *dir,
982 struct btrfs_inode *inode,
983 u64 inode_objectid, u64 parent_objectid,
984 u64 ref_index, char *name, int namelen,
990 struct extent_buffer *leaf;
991 struct btrfs_dir_item *di;
992 struct btrfs_key search_key;
993 struct btrfs_inode_extref *extref;
996 /* Search old style refs */
997 search_key.objectid = inode_objectid;
998 search_key.type = BTRFS_INODE_REF_KEY;
999 search_key.offset = parent_objectid;
1000 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1002 struct btrfs_inode_ref *victim_ref;
1004 unsigned long ptr_end;
1006 leaf = path->nodes[0];
1008 /* are we trying to overwrite a back ref for the root directory
1009 * if so, just jump out, we're done
1011 if (search_key.objectid == search_key.offset)
1014 /* check all the names in this back reference to see
1015 * if they are in the log. if so, we allow them to stay
1016 * otherwise they must be unlinked as a conflict
1018 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1019 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1020 while (ptr < ptr_end) {
1021 victim_ref = (struct btrfs_inode_ref *)ptr;
1022 victim_name_len = btrfs_inode_ref_name_len(leaf,
1024 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1028 read_extent_buffer(leaf, victim_name,
1029 (unsigned long)(victim_ref + 1),
1032 ret = backref_in_log(log_root, &search_key,
1033 parent_objectid, victim_name,
1039 inc_nlink(&inode->vfs_inode);
1040 btrfs_release_path(path);
1042 ret = btrfs_unlink_inode(trans, root, dir, inode,
1043 victim_name, victim_name_len);
1047 ret = btrfs_run_delayed_items(trans);
1055 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1059 * NOTE: we have searched root tree and checked the
1060 * corresponding ref, it does not need to check again.
1064 btrfs_release_path(path);
1066 /* Same search but for extended refs */
1067 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1068 inode_objectid, parent_objectid, 0,
1070 if (!IS_ERR_OR_NULL(extref)) {
1074 struct inode *victim_parent;
1076 leaf = path->nodes[0];
1078 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1079 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1081 while (cur_offset < item_size) {
1082 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1084 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1086 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1089 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1092 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1095 search_key.objectid = inode_objectid;
1096 search_key.type = BTRFS_INODE_EXTREF_KEY;
1097 search_key.offset = btrfs_extref_hash(parent_objectid,
1100 ret = backref_in_log(log_root, &search_key,
1101 parent_objectid, victim_name,
1107 victim_parent = read_one_inode(root,
1109 if (victim_parent) {
1110 inc_nlink(&inode->vfs_inode);
1111 btrfs_release_path(path);
1113 ret = btrfs_unlink_inode(trans, root,
1114 BTRFS_I(victim_parent),
1119 ret = btrfs_run_delayed_items(
1122 iput(victim_parent);
1131 cur_offset += victim_name_len + sizeof(*extref);
1135 btrfs_release_path(path);
1137 /* look for a conflicting sequence number */
1138 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1139 ref_index, name, namelen, 0);
1140 if (di && !IS_ERR(di)) {
1141 ret = drop_one_dir_item(trans, root, path, dir, di);
1145 btrfs_release_path(path);
1147 /* look for a conflicting name */
1148 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1150 if (di && !IS_ERR(di)) {
1151 ret = drop_one_dir_item(trans, root, path, dir, di);
1155 btrfs_release_path(path);
1160 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1161 u32 *namelen, char **name, u64 *index,
1162 u64 *parent_objectid)
1164 struct btrfs_inode_extref *extref;
1166 extref = (struct btrfs_inode_extref *)ref_ptr;
1168 *namelen = btrfs_inode_extref_name_len(eb, extref);
1169 *name = kmalloc(*namelen, GFP_NOFS);
1173 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1177 *index = btrfs_inode_extref_index(eb, extref);
1178 if (parent_objectid)
1179 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1184 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1185 u32 *namelen, char **name, u64 *index)
1187 struct btrfs_inode_ref *ref;
1189 ref = (struct btrfs_inode_ref *)ref_ptr;
1191 *namelen = btrfs_inode_ref_name_len(eb, ref);
1192 *name = kmalloc(*namelen, GFP_NOFS);
1196 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1199 *index = btrfs_inode_ref_index(eb, ref);
1205 * Take an inode reference item from the log tree and iterate all names from the
1206 * inode reference item in the subvolume tree with the same key (if it exists).
1207 * For any name that is not in the inode reference item from the log tree, do a
1208 * proper unlink of that name (that is, remove its entry from the inode
1209 * reference item and both dir index keys).
1211 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1212 struct btrfs_root *root,
1213 struct btrfs_path *path,
1214 struct btrfs_inode *inode,
1215 struct extent_buffer *log_eb,
1217 struct btrfs_key *key)
1220 unsigned long ref_ptr;
1221 unsigned long ref_end;
1222 struct extent_buffer *eb;
1225 btrfs_release_path(path);
1226 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1234 eb = path->nodes[0];
1235 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1236 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1237 while (ref_ptr < ref_end) {
1242 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1243 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1246 parent_id = key->offset;
1247 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1253 if (key->type == BTRFS_INODE_EXTREF_KEY)
1254 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
1258 ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
1264 btrfs_release_path(path);
1265 dir = read_one_inode(root, parent_id);
1271 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
1272 inode, name, namelen);
1282 if (key->type == BTRFS_INODE_EXTREF_KEY)
1283 ref_ptr += sizeof(struct btrfs_inode_extref);
1285 ref_ptr += sizeof(struct btrfs_inode_ref);
1289 btrfs_release_path(path);
1293 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1294 const u8 ref_type, const char *name,
1297 struct btrfs_key key;
1298 struct btrfs_path *path;
1299 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1302 path = btrfs_alloc_path();
1306 key.objectid = btrfs_ino(BTRFS_I(inode));
1307 key.type = ref_type;
1308 if (key.type == BTRFS_INODE_REF_KEY)
1309 key.offset = parent_id;
1311 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1313 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1320 if (key.type == BTRFS_INODE_EXTREF_KEY)
1321 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1322 path->slots[0], parent_id, name, namelen);
1324 ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1328 btrfs_free_path(path);
1332 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1333 struct inode *dir, struct inode *inode, const char *name,
1334 int namelen, u64 ref_index)
1336 struct btrfs_dir_item *dir_item;
1337 struct btrfs_key key;
1338 struct btrfs_path *path;
1339 struct inode *other_inode = NULL;
1342 path = btrfs_alloc_path();
1346 dir_item = btrfs_lookup_dir_item(NULL, root, path,
1347 btrfs_ino(BTRFS_I(dir)),
1350 btrfs_release_path(path);
1352 } else if (IS_ERR(dir_item)) {
1353 ret = PTR_ERR(dir_item);
1358 * Our inode's dentry collides with the dentry of another inode which is
1359 * in the log but not yet processed since it has a higher inode number.
1360 * So delete that other dentry.
1362 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1363 btrfs_release_path(path);
1364 other_inode = read_one_inode(root, key.objectid);
1369 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
1374 * If we dropped the link count to 0, bump it so that later the iput()
1375 * on the inode will not free it. We will fixup the link count later.
1377 if (other_inode->i_nlink == 0)
1378 inc_nlink(other_inode);
1380 ret = btrfs_run_delayed_items(trans);
1384 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1385 name, namelen, 0, ref_index);
1388 btrfs_free_path(path);
1394 * replay one inode back reference item found in the log tree.
1395 * eb, slot and key refer to the buffer and key found in the log tree.
1396 * root is the destination we are replaying into, and path is for temp
1397 * use by this function. (it should be released on return).
1399 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1400 struct btrfs_root *root,
1401 struct btrfs_root *log,
1402 struct btrfs_path *path,
1403 struct extent_buffer *eb, int slot,
1404 struct btrfs_key *key)
1406 struct inode *dir = NULL;
1407 struct inode *inode = NULL;
1408 unsigned long ref_ptr;
1409 unsigned long ref_end;
1413 int search_done = 0;
1414 int log_ref_ver = 0;
1415 u64 parent_objectid;
1418 int ref_struct_size;
1420 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1421 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1423 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1424 struct btrfs_inode_extref *r;
1426 ref_struct_size = sizeof(struct btrfs_inode_extref);
1428 r = (struct btrfs_inode_extref *)ref_ptr;
1429 parent_objectid = btrfs_inode_extref_parent(eb, r);
1431 ref_struct_size = sizeof(struct btrfs_inode_ref);
1432 parent_objectid = key->offset;
1434 inode_objectid = key->objectid;
1437 * it is possible that we didn't log all the parent directories
1438 * for a given inode. If we don't find the dir, just don't
1439 * copy the back ref in. The link count fixup code will take
1442 dir = read_one_inode(root, parent_objectid);
1448 inode = read_one_inode(root, inode_objectid);
1454 while (ref_ptr < ref_end) {
1456 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1457 &ref_index, &parent_objectid);
1459 * parent object can change from one array
1463 dir = read_one_inode(root, parent_objectid);
1469 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1475 /* if we already have a perfect match, we're done */
1476 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1477 btrfs_ino(BTRFS_I(inode)), ref_index,
1480 * look for a conflicting back reference in the
1481 * metadata. if we find one we have to unlink that name
1482 * of the file before we add our new link. Later on, we
1483 * overwrite any existing back reference, and we don't
1484 * want to create dangling pointers in the directory.
1488 ret = __add_inode_ref(trans, root, path, log,
1493 ref_index, name, namelen,
1503 * If a reference item already exists for this inode
1504 * with the same parent and name, but different index,
1505 * drop it and the corresponding directory index entries
1506 * from the parent before adding the new reference item
1507 * and dir index entries, otherwise we would fail with
1508 * -EEXIST returned from btrfs_add_link() below.
1510 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1513 ret = btrfs_unlink_inode(trans, root,
1518 * If we dropped the link count to 0, bump it so
1519 * that later the iput() on the inode will not
1520 * free it. We will fixup the link count later.
1522 if (!ret && inode->i_nlink == 0)
1528 /* insert our name */
1529 ret = add_link(trans, root, dir, inode, name, namelen,
1534 btrfs_update_inode(trans, root, inode);
1537 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1547 * Before we overwrite the inode reference item in the subvolume tree
1548 * with the item from the log tree, we must unlink all names from the
1549 * parent directory that are in the subvolume's tree inode reference
1550 * item, otherwise we end up with an inconsistent subvolume tree where
1551 * dir index entries exist for a name but there is no inode reference
1552 * item with the same name.
1554 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1559 /* finally write the back reference in the inode */
1560 ret = overwrite_item(trans, root, path, eb, slot, key);
1562 btrfs_release_path(path);
1569 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1570 struct btrfs_root *root, u64 ino)
1574 ret = btrfs_insert_orphan_item(trans, root, ino);
1581 static int count_inode_extrefs(struct btrfs_root *root,
1582 struct btrfs_inode *inode, struct btrfs_path *path)
1586 unsigned int nlink = 0;
1589 u64 inode_objectid = btrfs_ino(inode);
1592 struct btrfs_inode_extref *extref;
1593 struct extent_buffer *leaf;
1596 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1601 leaf = path->nodes[0];
1602 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1603 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1606 while (cur_offset < item_size) {
1607 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1608 name_len = btrfs_inode_extref_name_len(leaf, extref);
1612 cur_offset += name_len + sizeof(*extref);
1616 btrfs_release_path(path);
1618 btrfs_release_path(path);
1620 if (ret < 0 && ret != -ENOENT)
1625 static int count_inode_refs(struct btrfs_root *root,
1626 struct btrfs_inode *inode, struct btrfs_path *path)
1629 struct btrfs_key key;
1630 unsigned int nlink = 0;
1632 unsigned long ptr_end;
1634 u64 ino = btrfs_ino(inode);
1637 key.type = BTRFS_INODE_REF_KEY;
1638 key.offset = (u64)-1;
1641 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1645 if (path->slots[0] == 0)
1650 btrfs_item_key_to_cpu(path->nodes[0], &key,
1652 if (key.objectid != ino ||
1653 key.type != BTRFS_INODE_REF_KEY)
1655 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1656 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1658 while (ptr < ptr_end) {
1659 struct btrfs_inode_ref *ref;
1661 ref = (struct btrfs_inode_ref *)ptr;
1662 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1664 ptr = (unsigned long)(ref + 1) + name_len;
1668 if (key.offset == 0)
1670 if (path->slots[0] > 0) {
1675 btrfs_release_path(path);
1677 btrfs_release_path(path);
1683 * There are a few corners where the link count of the file can't
1684 * be properly maintained during replay. So, instead of adding
1685 * lots of complexity to the log code, we just scan the backrefs
1686 * for any file that has been through replay.
1688 * The scan will update the link count on the inode to reflect the
1689 * number of back refs found. If it goes down to zero, the iput
1690 * will free the inode.
1692 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1693 struct btrfs_root *root,
1694 struct inode *inode)
1696 struct btrfs_path *path;
1699 u64 ino = btrfs_ino(BTRFS_I(inode));
1701 path = btrfs_alloc_path();
1705 ret = count_inode_refs(root, BTRFS_I(inode), path);
1711 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1719 if (nlink != inode->i_nlink) {
1720 set_nlink(inode, nlink);
1721 btrfs_update_inode(trans, root, inode);
1723 BTRFS_I(inode)->index_cnt = (u64)-1;
1725 if (inode->i_nlink == 0) {
1726 if (S_ISDIR(inode->i_mode)) {
1727 ret = replay_dir_deletes(trans, root, NULL, path,
1732 ret = insert_orphan_item(trans, root, ino);
1736 btrfs_free_path(path);
1740 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1741 struct btrfs_root *root,
1742 struct btrfs_path *path)
1745 struct btrfs_key key;
1746 struct inode *inode;
1748 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1749 key.type = BTRFS_ORPHAN_ITEM_KEY;
1750 key.offset = (u64)-1;
1752 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1757 if (path->slots[0] == 0)
1762 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1763 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1764 key.type != BTRFS_ORPHAN_ITEM_KEY)
1767 ret = btrfs_del_item(trans, root, path);
1771 btrfs_release_path(path);
1772 inode = read_one_inode(root, key.offset);
1776 ret = fixup_inode_link_count(trans, root, inode);
1782 * fixup on a directory may create new entries,
1783 * make sure we always look for the highset possible
1786 key.offset = (u64)-1;
1790 btrfs_release_path(path);
1796 * record a given inode in the fixup dir so we can check its link
1797 * count when replay is done. The link count is incremented here
1798 * so the inode won't go away until we check it
1800 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1801 struct btrfs_root *root,
1802 struct btrfs_path *path,
1805 struct btrfs_key key;
1807 struct inode *inode;
1809 inode = read_one_inode(root, objectid);
1813 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1814 key.type = BTRFS_ORPHAN_ITEM_KEY;
1815 key.offset = objectid;
1817 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1819 btrfs_release_path(path);
1821 if (!inode->i_nlink)
1822 set_nlink(inode, 1);
1825 ret = btrfs_update_inode(trans, root, inode);
1826 } else if (ret == -EEXIST) {
1829 BUG(); /* Logic Error */
1837 * when replaying the log for a directory, we only insert names
1838 * for inodes that actually exist. This means an fsync on a directory
1839 * does not implicitly fsync all the new files in it
1841 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1842 struct btrfs_root *root,
1843 u64 dirid, u64 index,
1844 char *name, int name_len,
1845 struct btrfs_key *location)
1847 struct inode *inode;
1851 inode = read_one_inode(root, location->objectid);
1855 dir = read_one_inode(root, dirid);
1861 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1862 name_len, 1, index);
1864 /* FIXME, put inode into FIXUP list */
1872 * take a single entry in a log directory item and replay it into
1875 * if a conflicting item exists in the subdirectory already,
1876 * the inode it points to is unlinked and put into the link count
1879 * If a name from the log points to a file or directory that does
1880 * not exist in the FS, it is skipped. fsyncs on directories
1881 * do not force down inodes inside that directory, just changes to the
1882 * names or unlinks in a directory.
1884 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1885 * non-existing inode) and 1 if the name was replayed.
1887 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1888 struct btrfs_root *root,
1889 struct btrfs_path *path,
1890 struct extent_buffer *eb,
1891 struct btrfs_dir_item *di,
1892 struct btrfs_key *key)
1896 struct btrfs_dir_item *dst_di;
1897 struct btrfs_key found_key;
1898 struct btrfs_key log_key;
1903 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1904 bool name_added = false;
1906 dir = read_one_inode(root, key->objectid);
1910 name_len = btrfs_dir_name_len(eb, di);
1911 name = kmalloc(name_len, GFP_NOFS);
1917 log_type = btrfs_dir_type(eb, di);
1918 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1921 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1922 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1927 btrfs_release_path(path);
1929 if (key->type == BTRFS_DIR_ITEM_KEY) {
1930 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1932 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1933 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1942 if (IS_ERR_OR_NULL(dst_di)) {
1943 /* we need a sequence number to insert, so we only
1944 * do inserts for the BTRFS_DIR_INDEX_KEY types
1946 if (key->type != BTRFS_DIR_INDEX_KEY)
1951 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1952 /* the existing item matches the logged item */
1953 if (found_key.objectid == log_key.objectid &&
1954 found_key.type == log_key.type &&
1955 found_key.offset == log_key.offset &&
1956 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1957 update_size = false;
1962 * don't drop the conflicting directory entry if the inode
1963 * for the new entry doesn't exist
1968 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
1972 if (key->type == BTRFS_DIR_INDEX_KEY)
1975 btrfs_release_path(path);
1976 if (!ret && update_size) {
1977 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
1978 ret = btrfs_update_inode(trans, root, dir);
1982 if (!ret && name_added)
1988 * Check if the inode reference exists in the log for the given name,
1989 * inode and parent inode
1991 found_key.objectid = log_key.objectid;
1992 found_key.type = BTRFS_INODE_REF_KEY;
1993 found_key.offset = key->objectid;
1994 ret = backref_in_log(root->log_root, &found_key, 0, name, name_len);
1998 /* The dentry will be added later. */
2000 update_size = false;
2004 found_key.objectid = log_key.objectid;
2005 found_key.type = BTRFS_INODE_EXTREF_KEY;
2006 found_key.offset = key->objectid;
2007 ret = backref_in_log(root->log_root, &found_key, key->objectid, name,
2012 /* The dentry will be added later. */
2014 update_size = false;
2017 btrfs_release_path(path);
2018 ret = insert_one_name(trans, root, key->objectid, key->offset,
2019 name, name_len, &log_key);
2020 if (ret && ret != -ENOENT && ret != -EEXIST)
2024 update_size = false;
2030 * find all the names in a directory item and reconcile them into
2031 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2032 * one name in a directory item, but the same code gets used for
2033 * both directory index types
2035 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2036 struct btrfs_root *root,
2037 struct btrfs_path *path,
2038 struct extent_buffer *eb, int slot,
2039 struct btrfs_key *key)
2042 u32 item_size = btrfs_item_size_nr(eb, slot);
2043 struct btrfs_dir_item *di;
2046 unsigned long ptr_end;
2047 struct btrfs_path *fixup_path = NULL;
2049 ptr = btrfs_item_ptr_offset(eb, slot);
2050 ptr_end = ptr + item_size;
2051 while (ptr < ptr_end) {
2052 di = (struct btrfs_dir_item *)ptr;
2053 name_len = btrfs_dir_name_len(eb, di);
2054 ret = replay_one_name(trans, root, path, eb, di, key);
2057 ptr = (unsigned long)(di + 1);
2061 * If this entry refers to a non-directory (directories can not
2062 * have a link count > 1) and it was added in the transaction
2063 * that was not committed, make sure we fixup the link count of
2064 * the inode it the entry points to. Otherwise something like
2065 * the following would result in a directory pointing to an
2066 * inode with a wrong link that does not account for this dir
2074 * ln testdir/bar testdir/bar_link
2075 * ln testdir/foo testdir/foo_link
2076 * xfs_io -c "fsync" testdir/bar
2080 * mount fs, log replay happens
2082 * File foo would remain with a link count of 1 when it has two
2083 * entries pointing to it in the directory testdir. This would
2084 * make it impossible to ever delete the parent directory has
2085 * it would result in stale dentries that can never be deleted.
2087 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2088 struct btrfs_key di_key;
2091 fixup_path = btrfs_alloc_path();
2098 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2099 ret = link_to_fixup_dir(trans, root, fixup_path,
2106 btrfs_free_path(fixup_path);
2111 * directory replay has two parts. There are the standard directory
2112 * items in the log copied from the subvolume, and range items
2113 * created in the log while the subvolume was logged.
2115 * The range items tell us which parts of the key space the log
2116 * is authoritative for. During replay, if a key in the subvolume
2117 * directory is in a logged range item, but not actually in the log
2118 * that means it was deleted from the directory before the fsync
2119 * and should be removed.
2121 static noinline int find_dir_range(struct btrfs_root *root,
2122 struct btrfs_path *path,
2123 u64 dirid, int key_type,
2124 u64 *start_ret, u64 *end_ret)
2126 struct btrfs_key key;
2128 struct btrfs_dir_log_item *item;
2132 if (*start_ret == (u64)-1)
2135 key.objectid = dirid;
2136 key.type = key_type;
2137 key.offset = *start_ret;
2139 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2143 if (path->slots[0] == 0)
2148 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2150 if (key.type != key_type || key.objectid != dirid) {
2154 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2155 struct btrfs_dir_log_item);
2156 found_end = btrfs_dir_log_end(path->nodes[0], item);
2158 if (*start_ret >= key.offset && *start_ret <= found_end) {
2160 *start_ret = key.offset;
2161 *end_ret = found_end;
2166 /* check the next slot in the tree to see if it is a valid item */
2167 nritems = btrfs_header_nritems(path->nodes[0]);
2169 if (path->slots[0] >= nritems) {
2170 ret = btrfs_next_leaf(root, path);
2175 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2177 if (key.type != key_type || key.objectid != dirid) {
2181 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2182 struct btrfs_dir_log_item);
2183 found_end = btrfs_dir_log_end(path->nodes[0], item);
2184 *start_ret = key.offset;
2185 *end_ret = found_end;
2188 btrfs_release_path(path);
2193 * this looks for a given directory item in the log. If the directory
2194 * item is not in the log, the item is removed and the inode it points
2197 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2198 struct btrfs_root *root,
2199 struct btrfs_root *log,
2200 struct btrfs_path *path,
2201 struct btrfs_path *log_path,
2203 struct btrfs_key *dir_key)
2206 struct extent_buffer *eb;
2209 struct btrfs_dir_item *di;
2210 struct btrfs_dir_item *log_di;
2213 unsigned long ptr_end;
2215 struct inode *inode;
2216 struct btrfs_key location;
2219 eb = path->nodes[0];
2220 slot = path->slots[0];
2221 item_size = btrfs_item_size_nr(eb, slot);
2222 ptr = btrfs_item_ptr_offset(eb, slot);
2223 ptr_end = ptr + item_size;
2224 while (ptr < ptr_end) {
2225 di = (struct btrfs_dir_item *)ptr;
2226 name_len = btrfs_dir_name_len(eb, di);
2227 name = kmalloc(name_len, GFP_NOFS);
2232 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2235 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2236 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2239 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2240 log_di = btrfs_lookup_dir_index_item(trans, log,
2246 if (!log_di || log_di == ERR_PTR(-ENOENT)) {
2247 btrfs_dir_item_key_to_cpu(eb, di, &location);
2248 btrfs_release_path(path);
2249 btrfs_release_path(log_path);
2250 inode = read_one_inode(root, location.objectid);
2256 ret = link_to_fixup_dir(trans, root,
2257 path, location.objectid);
2265 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2266 BTRFS_I(inode), name, name_len);
2268 ret = btrfs_run_delayed_items(trans);
2274 /* there might still be more names under this key
2275 * check and repeat if required
2277 ret = btrfs_search_slot(NULL, root, dir_key, path,
2283 } else if (IS_ERR(log_di)) {
2285 return PTR_ERR(log_di);
2287 btrfs_release_path(log_path);
2290 ptr = (unsigned long)(di + 1);
2295 btrfs_release_path(path);
2296 btrfs_release_path(log_path);
2300 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2301 struct btrfs_root *root,
2302 struct btrfs_root *log,
2303 struct btrfs_path *path,
2306 struct btrfs_key search_key;
2307 struct btrfs_path *log_path;
2312 log_path = btrfs_alloc_path();
2316 search_key.objectid = ino;
2317 search_key.type = BTRFS_XATTR_ITEM_KEY;
2318 search_key.offset = 0;
2320 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2324 nritems = btrfs_header_nritems(path->nodes[0]);
2325 for (i = path->slots[0]; i < nritems; i++) {
2326 struct btrfs_key key;
2327 struct btrfs_dir_item *di;
2328 struct btrfs_dir_item *log_di;
2332 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2333 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2338 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2339 total_size = btrfs_item_size_nr(path->nodes[0], i);
2341 while (cur < total_size) {
2342 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2343 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2344 u32 this_len = sizeof(*di) + name_len + data_len;
2347 name = kmalloc(name_len, GFP_NOFS);
2352 read_extent_buffer(path->nodes[0], name,
2353 (unsigned long)(di + 1), name_len);
2355 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2357 btrfs_release_path(log_path);
2359 /* Doesn't exist in log tree, so delete it. */
2360 btrfs_release_path(path);
2361 di = btrfs_lookup_xattr(trans, root, path, ino,
2362 name, name_len, -1);
2369 ret = btrfs_delete_one_dir_name(trans, root,
2373 btrfs_release_path(path);
2378 if (IS_ERR(log_di)) {
2379 ret = PTR_ERR(log_di);
2383 di = (struct btrfs_dir_item *)((char *)di + this_len);
2386 ret = btrfs_next_leaf(root, path);
2392 btrfs_free_path(log_path);
2393 btrfs_release_path(path);
2399 * deletion replay happens before we copy any new directory items
2400 * out of the log or out of backreferences from inodes. It
2401 * scans the log to find ranges of keys that log is authoritative for,
2402 * and then scans the directory to find items in those ranges that are
2403 * not present in the log.
2405 * Anything we don't find in the log is unlinked and removed from the
2408 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2409 struct btrfs_root *root,
2410 struct btrfs_root *log,
2411 struct btrfs_path *path,
2412 u64 dirid, int del_all)
2416 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2418 struct btrfs_key dir_key;
2419 struct btrfs_key found_key;
2420 struct btrfs_path *log_path;
2423 dir_key.objectid = dirid;
2424 dir_key.type = BTRFS_DIR_ITEM_KEY;
2425 log_path = btrfs_alloc_path();
2429 dir = read_one_inode(root, dirid);
2430 /* it isn't an error if the inode isn't there, that can happen
2431 * because we replay the deletes before we copy in the inode item
2435 btrfs_free_path(log_path);
2443 range_end = (u64)-1;
2445 ret = find_dir_range(log, path, dirid, key_type,
2446 &range_start, &range_end);
2451 dir_key.offset = range_start;
2454 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2459 nritems = btrfs_header_nritems(path->nodes[0]);
2460 if (path->slots[0] >= nritems) {
2461 ret = btrfs_next_leaf(root, path);
2467 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2469 if (found_key.objectid != dirid ||
2470 found_key.type != dir_key.type)
2473 if (found_key.offset > range_end)
2476 ret = check_item_in_log(trans, root, log, path,
2481 if (found_key.offset == (u64)-1)
2483 dir_key.offset = found_key.offset + 1;
2485 btrfs_release_path(path);
2486 if (range_end == (u64)-1)
2488 range_start = range_end + 1;
2493 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2494 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2495 dir_key.type = BTRFS_DIR_INDEX_KEY;
2496 btrfs_release_path(path);
2500 btrfs_release_path(path);
2501 btrfs_free_path(log_path);
2507 * the process_func used to replay items from the log tree. This
2508 * gets called in two different stages. The first stage just looks
2509 * for inodes and makes sure they are all copied into the subvolume.
2511 * The second stage copies all the other item types from the log into
2512 * the subvolume. The two stage approach is slower, but gets rid of
2513 * lots of complexity around inodes referencing other inodes that exist
2514 * only in the log (references come from either directory items or inode
2517 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2518 struct walk_control *wc, u64 gen, int level)
2521 struct btrfs_path *path;
2522 struct btrfs_root *root = wc->replay_dest;
2523 struct btrfs_key key;
2527 ret = btrfs_read_buffer(eb, gen, level, NULL);
2531 level = btrfs_header_level(eb);
2536 path = btrfs_alloc_path();
2540 nritems = btrfs_header_nritems(eb);
2541 for (i = 0; i < nritems; i++) {
2542 btrfs_item_key_to_cpu(eb, &key, i);
2544 /* inode keys are done during the first stage */
2545 if (key.type == BTRFS_INODE_ITEM_KEY &&
2546 wc->stage == LOG_WALK_REPLAY_INODES) {
2547 struct btrfs_inode_item *inode_item;
2550 inode_item = btrfs_item_ptr(eb, i,
2551 struct btrfs_inode_item);
2553 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2554 * and never got linked before the fsync, skip it, as
2555 * replaying it is pointless since it would be deleted
2556 * later. We skip logging tmpfiles, but it's always
2557 * possible we are replaying a log created with a kernel
2558 * that used to log tmpfiles.
2560 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2561 wc->ignore_cur_inode = true;
2564 wc->ignore_cur_inode = false;
2566 ret = replay_xattr_deletes(wc->trans, root, log,
2567 path, key.objectid);
2570 mode = btrfs_inode_mode(eb, inode_item);
2571 if (S_ISDIR(mode)) {
2572 ret = replay_dir_deletes(wc->trans,
2573 root, log, path, key.objectid, 0);
2577 ret = overwrite_item(wc->trans, root, path,
2583 * Before replaying extents, truncate the inode to its
2584 * size. We need to do it now and not after log replay
2585 * because before an fsync we can have prealloc extents
2586 * added beyond the inode's i_size. If we did it after,
2587 * through orphan cleanup for example, we would drop
2588 * those prealloc extents just after replaying them.
2590 if (S_ISREG(mode)) {
2591 struct inode *inode;
2594 inode = read_one_inode(root, key.objectid);
2599 from = ALIGN(i_size_read(inode),
2600 root->fs_info->sectorsize);
2601 ret = btrfs_drop_extents(wc->trans, root, inode,
2604 /* Update the inode's nbytes. */
2605 ret = btrfs_update_inode(wc->trans,
2613 ret = link_to_fixup_dir(wc->trans, root,
2614 path, key.objectid);
2619 if (wc->ignore_cur_inode)
2622 if (key.type == BTRFS_DIR_INDEX_KEY &&
2623 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2624 ret = replay_one_dir_item(wc->trans, root, path,
2630 if (wc->stage < LOG_WALK_REPLAY_ALL)
2633 /* these keys are simply copied */
2634 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2635 ret = overwrite_item(wc->trans, root, path,
2639 } else if (key.type == BTRFS_INODE_REF_KEY ||
2640 key.type == BTRFS_INODE_EXTREF_KEY) {
2641 ret = add_inode_ref(wc->trans, root, log, path,
2643 if (ret && ret != -ENOENT)
2646 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2647 ret = replay_one_extent(wc->trans, root, path,
2651 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2652 ret = replay_one_dir_item(wc->trans, root, path,
2658 btrfs_free_path(path);
2662 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2663 struct btrfs_root *root,
2664 struct btrfs_path *path, int *level,
2665 struct walk_control *wc)
2667 struct btrfs_fs_info *fs_info = root->fs_info;
2671 struct extent_buffer *next;
2672 struct extent_buffer *cur;
2673 struct extent_buffer *parent;
2677 WARN_ON(*level < 0);
2678 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2680 while (*level > 0) {
2681 struct btrfs_key first_key;
2683 WARN_ON(*level < 0);
2684 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2685 cur = path->nodes[*level];
2687 WARN_ON(btrfs_header_level(cur) != *level);
2689 if (path->slots[*level] >=
2690 btrfs_header_nritems(cur))
2693 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2694 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2695 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2696 blocksize = fs_info->nodesize;
2698 parent = path->nodes[*level];
2699 root_owner = btrfs_header_owner(parent);
2701 next = btrfs_find_create_tree_block(fs_info, bytenr);
2703 return PTR_ERR(next);
2706 ret = wc->process_func(root, next, wc, ptr_gen,
2709 free_extent_buffer(next);
2713 path->slots[*level]++;
2715 ret = btrfs_read_buffer(next, ptr_gen,
2716 *level - 1, &first_key);
2718 free_extent_buffer(next);
2723 btrfs_tree_lock(next);
2724 btrfs_set_lock_blocking_write(next);
2725 btrfs_clean_tree_block(next);
2726 btrfs_wait_tree_block_writeback(next);
2727 btrfs_tree_unlock(next);
2729 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2730 clear_extent_buffer_dirty(next);
2733 WARN_ON(root_owner !=
2734 BTRFS_TREE_LOG_OBJECTID);
2735 ret = btrfs_free_and_pin_reserved_extent(
2739 free_extent_buffer(next);
2743 free_extent_buffer(next);
2746 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2748 free_extent_buffer(next);
2752 WARN_ON(*level <= 0);
2753 if (path->nodes[*level-1])
2754 free_extent_buffer(path->nodes[*level-1]);
2755 path->nodes[*level-1] = next;
2756 *level = btrfs_header_level(next);
2757 path->slots[*level] = 0;
2760 WARN_ON(*level < 0);
2761 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2763 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2769 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2770 struct btrfs_root *root,
2771 struct btrfs_path *path, int *level,
2772 struct walk_control *wc)
2774 struct btrfs_fs_info *fs_info = root->fs_info;
2780 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2781 slot = path->slots[i];
2782 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2785 WARN_ON(*level == 0);
2788 struct extent_buffer *parent;
2789 if (path->nodes[*level] == root->node)
2790 parent = path->nodes[*level];
2792 parent = path->nodes[*level + 1];
2794 root_owner = btrfs_header_owner(parent);
2795 ret = wc->process_func(root, path->nodes[*level], wc,
2796 btrfs_header_generation(path->nodes[*level]),
2802 struct extent_buffer *next;
2804 next = path->nodes[*level];
2807 btrfs_tree_lock(next);
2808 btrfs_set_lock_blocking_write(next);
2809 btrfs_clean_tree_block(next);
2810 btrfs_wait_tree_block_writeback(next);
2811 btrfs_tree_unlock(next);
2813 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2814 clear_extent_buffer_dirty(next);
2817 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2818 ret = btrfs_free_and_pin_reserved_extent(
2820 path->nodes[*level]->start,
2821 path->nodes[*level]->len);
2825 free_extent_buffer(path->nodes[*level]);
2826 path->nodes[*level] = NULL;
2834 * drop the reference count on the tree rooted at 'snap'. This traverses
2835 * the tree freeing any blocks that have a ref count of zero after being
2838 static int walk_log_tree(struct btrfs_trans_handle *trans,
2839 struct btrfs_root *log, struct walk_control *wc)
2841 struct btrfs_fs_info *fs_info = log->fs_info;
2845 struct btrfs_path *path;
2848 path = btrfs_alloc_path();
2852 level = btrfs_header_level(log->node);
2854 path->nodes[level] = log->node;
2855 atomic_inc(&log->node->refs);
2856 path->slots[level] = 0;
2859 wret = walk_down_log_tree(trans, log, path, &level, wc);
2867 wret = walk_up_log_tree(trans, log, path, &level, wc);
2876 /* was the root node processed? if not, catch it here */
2877 if (path->nodes[orig_level]) {
2878 ret = wc->process_func(log, path->nodes[orig_level], wc,
2879 btrfs_header_generation(path->nodes[orig_level]),
2884 struct extent_buffer *next;
2886 next = path->nodes[orig_level];
2889 btrfs_tree_lock(next);
2890 btrfs_set_lock_blocking_write(next);
2891 btrfs_clean_tree_block(next);
2892 btrfs_wait_tree_block_writeback(next);
2893 btrfs_tree_unlock(next);
2895 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2896 clear_extent_buffer_dirty(next);
2899 WARN_ON(log->root_key.objectid !=
2900 BTRFS_TREE_LOG_OBJECTID);
2901 ret = btrfs_free_and_pin_reserved_extent(fs_info,
2902 next->start, next->len);
2909 btrfs_free_path(path);
2914 * helper function to update the item for a given subvolumes log root
2915 * in the tree of log roots
2917 static int update_log_root(struct btrfs_trans_handle *trans,
2918 struct btrfs_root *log,
2919 struct btrfs_root_item *root_item)
2921 struct btrfs_fs_info *fs_info = log->fs_info;
2924 if (log->log_transid == 1) {
2925 /* insert root item on the first sync */
2926 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2927 &log->root_key, root_item);
2929 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2930 &log->root_key, root_item);
2935 static void wait_log_commit(struct btrfs_root *root, int transid)
2938 int index = transid % 2;
2941 * we only allow two pending log transactions at a time,
2942 * so we know that if ours is more than 2 older than the
2943 * current transaction, we're done
2946 prepare_to_wait(&root->log_commit_wait[index],
2947 &wait, TASK_UNINTERRUPTIBLE);
2949 if (!(root->log_transid_committed < transid &&
2950 atomic_read(&root->log_commit[index])))
2953 mutex_unlock(&root->log_mutex);
2955 mutex_lock(&root->log_mutex);
2957 finish_wait(&root->log_commit_wait[index], &wait);
2960 static void wait_for_writer(struct btrfs_root *root)
2965 prepare_to_wait(&root->log_writer_wait, &wait,
2966 TASK_UNINTERRUPTIBLE);
2967 if (!atomic_read(&root->log_writers))
2970 mutex_unlock(&root->log_mutex);
2972 mutex_lock(&root->log_mutex);
2974 finish_wait(&root->log_writer_wait, &wait);
2977 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2978 struct btrfs_log_ctx *ctx)
2983 mutex_lock(&root->log_mutex);
2984 list_del_init(&ctx->list);
2985 mutex_unlock(&root->log_mutex);
2989 * Invoked in log mutex context, or be sure there is no other task which
2990 * can access the list.
2992 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2993 int index, int error)
2995 struct btrfs_log_ctx *ctx;
2996 struct btrfs_log_ctx *safe;
2998 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
2999 list_del_init(&ctx->list);
3000 ctx->log_ret = error;
3003 INIT_LIST_HEAD(&root->log_ctxs[index]);
3007 * btrfs_sync_log does sends a given tree log down to the disk and
3008 * updates the super blocks to record it. When this call is done,
3009 * you know that any inodes previously logged are safely on disk only
3012 * Any other return value means you need to call btrfs_commit_transaction.
3013 * Some of the edge cases for fsyncing directories that have had unlinks
3014 * or renames done in the past mean that sometimes the only safe
3015 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3016 * that has happened.
3018 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3019 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3025 struct btrfs_fs_info *fs_info = root->fs_info;
3026 struct btrfs_root *log = root->log_root;
3027 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3028 struct btrfs_root_item new_root_item;
3029 int log_transid = 0;
3030 struct btrfs_log_ctx root_log_ctx;
3031 struct blk_plug plug;
3033 mutex_lock(&root->log_mutex);
3034 log_transid = ctx->log_transid;
3035 if (root->log_transid_committed >= log_transid) {
3036 mutex_unlock(&root->log_mutex);
3037 return ctx->log_ret;
3040 index1 = log_transid % 2;
3041 if (atomic_read(&root->log_commit[index1])) {
3042 wait_log_commit(root, log_transid);
3043 mutex_unlock(&root->log_mutex);
3044 return ctx->log_ret;
3046 ASSERT(log_transid == root->log_transid);
3047 atomic_set(&root->log_commit[index1], 1);
3049 /* wait for previous tree log sync to complete */
3050 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3051 wait_log_commit(root, log_transid - 1);
3054 int batch = atomic_read(&root->log_batch);
3055 /* when we're on an ssd, just kick the log commit out */
3056 if (!btrfs_test_opt(fs_info, SSD) &&
3057 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3058 mutex_unlock(&root->log_mutex);
3059 schedule_timeout_uninterruptible(1);
3060 mutex_lock(&root->log_mutex);
3062 wait_for_writer(root);
3063 if (batch == atomic_read(&root->log_batch))
3067 /* bail out if we need to do a full commit */
3068 if (btrfs_need_log_full_commit(trans)) {
3070 mutex_unlock(&root->log_mutex);
3074 if (log_transid % 2 == 0)
3075 mark = EXTENT_DIRTY;
3079 /* we start IO on all the marked extents here, but we don't actually
3080 * wait for them until later.
3082 blk_start_plug(&plug);
3083 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3085 blk_finish_plug(&plug);
3086 btrfs_abort_transaction(trans, ret);
3087 btrfs_set_log_full_commit(trans);
3088 mutex_unlock(&root->log_mutex);
3093 * We _must_ update under the root->log_mutex in order to make sure we
3094 * have a consistent view of the log root we are trying to commit at
3097 * We _must_ copy this into a local copy, because we are not holding the
3098 * log_root_tree->log_mutex yet. This is important because when we
3099 * commit the log_root_tree we must have a consistent view of the
3100 * log_root_tree when we update the super block to point at the
3101 * log_root_tree bytenr. If we update the log_root_tree here we'll race
3102 * with the commit and possibly point at the new block which we may not
3105 btrfs_set_root_node(&log->root_item, log->node);
3106 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
3108 root->log_transid++;
3109 log->log_transid = root->log_transid;
3110 root->log_start_pid = 0;
3112 * IO has been started, blocks of the log tree have WRITTEN flag set
3113 * in their headers. new modifications of the log will be written to
3114 * new positions. so it's safe to allow log writers to go in.
3116 mutex_unlock(&root->log_mutex);
3118 btrfs_init_log_ctx(&root_log_ctx, NULL);
3120 mutex_lock(&log_root_tree->log_mutex);
3121 atomic_inc(&log_root_tree->log_batch);
3122 atomic_inc(&log_root_tree->log_writers);
3124 index2 = log_root_tree->log_transid % 2;
3125 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3126 root_log_ctx.log_transid = log_root_tree->log_transid;
3128 mutex_unlock(&log_root_tree->log_mutex);
3130 mutex_lock(&log_root_tree->log_mutex);
3133 * Now we are safe to update the log_root_tree because we're under the
3134 * log_mutex, and we're a current writer so we're holding the commit
3135 * open until we drop the log_mutex.
3137 ret = update_log_root(trans, log, &new_root_item);
3139 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
3140 /* atomic_dec_and_test implies a barrier */
3141 cond_wake_up_nomb(&log_root_tree->log_writer_wait);
3145 if (!list_empty(&root_log_ctx.list))
3146 list_del_init(&root_log_ctx.list);
3148 blk_finish_plug(&plug);
3149 btrfs_set_log_full_commit(trans);
3151 if (ret != -ENOSPC) {
3152 btrfs_abort_transaction(trans, ret);
3153 mutex_unlock(&log_root_tree->log_mutex);
3156 btrfs_wait_tree_log_extents(log, mark);
3157 mutex_unlock(&log_root_tree->log_mutex);
3162 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3163 blk_finish_plug(&plug);
3164 list_del_init(&root_log_ctx.list);
3165 mutex_unlock(&log_root_tree->log_mutex);
3166 ret = root_log_ctx.log_ret;
3170 index2 = root_log_ctx.log_transid % 2;
3171 if (atomic_read(&log_root_tree->log_commit[index2])) {
3172 blk_finish_plug(&plug);
3173 ret = btrfs_wait_tree_log_extents(log, mark);
3174 wait_log_commit(log_root_tree,
3175 root_log_ctx.log_transid);
3176 mutex_unlock(&log_root_tree->log_mutex);
3178 ret = root_log_ctx.log_ret;
3181 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3182 atomic_set(&log_root_tree->log_commit[index2], 1);
3184 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3185 wait_log_commit(log_root_tree,
3186 root_log_ctx.log_transid - 1);
3189 wait_for_writer(log_root_tree);
3192 * now that we've moved on to the tree of log tree roots,
3193 * check the full commit flag again
3195 if (btrfs_need_log_full_commit(trans)) {
3196 blk_finish_plug(&plug);
3197 btrfs_wait_tree_log_extents(log, mark);
3198 mutex_unlock(&log_root_tree->log_mutex);
3200 goto out_wake_log_root;
3203 ret = btrfs_write_marked_extents(fs_info,
3204 &log_root_tree->dirty_log_pages,
3205 EXTENT_DIRTY | EXTENT_NEW);
3206 blk_finish_plug(&plug);
3208 btrfs_set_log_full_commit(trans);
3209 btrfs_abort_transaction(trans, ret);
3210 mutex_unlock(&log_root_tree->log_mutex);
3211 goto out_wake_log_root;
3213 ret = btrfs_wait_tree_log_extents(log, mark);
3215 ret = btrfs_wait_tree_log_extents(log_root_tree,
3216 EXTENT_NEW | EXTENT_DIRTY);
3218 btrfs_set_log_full_commit(trans);
3219 mutex_unlock(&log_root_tree->log_mutex);
3220 goto out_wake_log_root;
3223 btrfs_set_super_log_root(fs_info->super_for_commit,
3224 log_root_tree->node->start);
3225 btrfs_set_super_log_root_level(fs_info->super_for_commit,
3226 btrfs_header_level(log_root_tree->node));
3228 log_root_tree->log_transid++;
3229 mutex_unlock(&log_root_tree->log_mutex);
3232 * Nobody else is going to jump in and write the ctree
3233 * super here because the log_commit atomic below is protecting
3234 * us. We must be called with a transaction handle pinning
3235 * the running transaction open, so a full commit can't hop
3236 * in and cause problems either.
3238 ret = write_all_supers(fs_info, 1);
3240 btrfs_set_log_full_commit(trans);
3241 btrfs_abort_transaction(trans, ret);
3242 goto out_wake_log_root;
3245 mutex_lock(&root->log_mutex);
3246 if (root->last_log_commit < log_transid)
3247 root->last_log_commit = log_transid;
3248 mutex_unlock(&root->log_mutex);
3251 mutex_lock(&log_root_tree->log_mutex);
3252 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3254 log_root_tree->log_transid_committed++;
3255 atomic_set(&log_root_tree->log_commit[index2], 0);
3256 mutex_unlock(&log_root_tree->log_mutex);
3259 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3260 * all the updates above are seen by the woken threads. It might not be
3261 * necessary, but proving that seems to be hard.
3263 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3265 mutex_lock(&root->log_mutex);
3266 btrfs_remove_all_log_ctxs(root, index1, ret);
3267 root->log_transid_committed++;
3268 atomic_set(&root->log_commit[index1], 0);
3269 mutex_unlock(&root->log_mutex);
3272 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3273 * all the updates above are seen by the woken threads. It might not be
3274 * necessary, but proving that seems to be hard.
3276 cond_wake_up(&root->log_commit_wait[index1]);
3280 static void free_log_tree(struct btrfs_trans_handle *trans,
3281 struct btrfs_root *log)
3284 struct walk_control wc = {
3286 .process_func = process_one_buffer
3289 ret = walk_log_tree(trans, log, &wc);
3292 btrfs_abort_transaction(trans, ret);
3294 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3297 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3298 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3299 free_extent_buffer(log->node);
3304 * free all the extents used by the tree log. This should be called
3305 * at commit time of the full transaction
3307 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3309 if (root->log_root) {
3310 free_log_tree(trans, root->log_root);
3311 root->log_root = NULL;
3316 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3317 struct btrfs_fs_info *fs_info)
3319 if (fs_info->log_root_tree) {
3320 free_log_tree(trans, fs_info->log_root_tree);
3321 fs_info->log_root_tree = NULL;
3327 * Check if an inode was logged in the current transaction. We can't always rely
3328 * on an inode's logged_trans value, because it's an in-memory only field and
3329 * therefore not persisted. This means that its value is lost if the inode gets
3330 * evicted and loaded again from disk (in which case it has a value of 0, and
3331 * certainly it is smaller then any possible transaction ID), when that happens
3332 * the full_sync flag is set in the inode's runtime flags, so on that case we
3333 * assume eviction happened and ignore the logged_trans value, assuming the
3334 * worst case, that the inode was logged before in the current transaction.
3336 static bool inode_logged(struct btrfs_trans_handle *trans,
3337 struct btrfs_inode *inode)
3339 if (inode->logged_trans == trans->transid)
3342 if (inode->last_trans == trans->transid &&
3343 test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
3344 !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
3351 * If both a file and directory are logged, and unlinks or renames are
3352 * mixed in, we have a few interesting corners:
3354 * create file X in dir Y
3355 * link file X to X.link in dir Y
3357 * unlink file X but leave X.link
3360 * After a crash we would expect only X.link to exist. But file X
3361 * didn't get fsync'd again so the log has back refs for X and X.link.
3363 * We solve this by removing directory entries and inode backrefs from the
3364 * log when a file that was logged in the current transaction is
3365 * unlinked. Any later fsync will include the updated log entries, and
3366 * we'll be able to reconstruct the proper directory items from backrefs.
3368 * This optimizations allows us to avoid relogging the entire inode
3369 * or the entire directory.
3371 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3372 struct btrfs_root *root,
3373 const char *name, int name_len,
3374 struct btrfs_inode *dir, u64 index)
3376 struct btrfs_root *log;
3377 struct btrfs_dir_item *di;
3378 struct btrfs_path *path;
3382 u64 dir_ino = btrfs_ino(dir);
3384 if (!inode_logged(trans, dir))
3387 ret = join_running_log_trans(root);
3391 mutex_lock(&dir->log_mutex);
3393 log = root->log_root;
3394 path = btrfs_alloc_path();
3400 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3401 name, name_len, -1);
3407 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3408 bytes_del += name_len;
3414 btrfs_release_path(path);
3415 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3416 index, name, name_len, -1);
3422 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3423 bytes_del += name_len;
3430 /* update the directory size in the log to reflect the names
3434 struct btrfs_key key;
3436 key.objectid = dir_ino;
3438 key.type = BTRFS_INODE_ITEM_KEY;
3439 btrfs_release_path(path);
3441 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3447 struct btrfs_inode_item *item;
3450 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3451 struct btrfs_inode_item);
3452 i_size = btrfs_inode_size(path->nodes[0], item);
3453 if (i_size > bytes_del)
3454 i_size -= bytes_del;
3457 btrfs_set_inode_size(path->nodes[0], item, i_size);
3458 btrfs_mark_buffer_dirty(path->nodes[0]);
3461 btrfs_release_path(path);
3464 btrfs_free_path(path);
3466 mutex_unlock(&dir->log_mutex);
3467 if (ret == -ENOSPC) {
3468 btrfs_set_log_full_commit(trans);
3471 btrfs_abort_transaction(trans, ret);
3473 btrfs_end_log_trans(root);
3478 /* see comments for btrfs_del_dir_entries_in_log */
3479 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3480 struct btrfs_root *root,
3481 const char *name, int name_len,
3482 struct btrfs_inode *inode, u64 dirid)
3484 struct btrfs_root *log;
3488 if (!inode_logged(trans, inode))
3491 ret = join_running_log_trans(root);
3494 log = root->log_root;
3495 mutex_lock(&inode->log_mutex);
3497 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3499 mutex_unlock(&inode->log_mutex);
3500 if (ret == -ENOSPC) {
3501 btrfs_set_log_full_commit(trans);
3503 } else if (ret < 0 && ret != -ENOENT)
3504 btrfs_abort_transaction(trans, ret);
3505 btrfs_end_log_trans(root);
3511 * creates a range item in the log for 'dirid'. first_offset and
3512 * last_offset tell us which parts of the key space the log should
3513 * be considered authoritative for.
3515 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3516 struct btrfs_root *log,
3517 struct btrfs_path *path,
3518 int key_type, u64 dirid,
3519 u64 first_offset, u64 last_offset)
3522 struct btrfs_key key;
3523 struct btrfs_dir_log_item *item;
3525 key.objectid = dirid;
3526 key.offset = first_offset;
3527 if (key_type == BTRFS_DIR_ITEM_KEY)
3528 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3530 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3531 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3535 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3536 struct btrfs_dir_log_item);
3537 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3538 btrfs_mark_buffer_dirty(path->nodes[0]);
3539 btrfs_release_path(path);
3544 * log all the items included in the current transaction for a given
3545 * directory. This also creates the range items in the log tree required
3546 * to replay anything deleted before the fsync
3548 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3549 struct btrfs_root *root, struct btrfs_inode *inode,
3550 struct btrfs_path *path,
3551 struct btrfs_path *dst_path, int key_type,
3552 struct btrfs_log_ctx *ctx,
3553 u64 min_offset, u64 *last_offset_ret)
3555 struct btrfs_key min_key;
3556 struct btrfs_root *log = root->log_root;
3557 struct extent_buffer *src;
3562 u64 first_offset = min_offset;
3563 u64 last_offset = (u64)-1;
3564 u64 ino = btrfs_ino(inode);
3566 log = root->log_root;
3568 min_key.objectid = ino;
3569 min_key.type = key_type;
3570 min_key.offset = min_offset;
3572 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3575 * we didn't find anything from this transaction, see if there
3576 * is anything at all
3578 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3579 min_key.objectid = ino;
3580 min_key.type = key_type;
3581 min_key.offset = (u64)-1;
3582 btrfs_release_path(path);
3583 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3585 btrfs_release_path(path);
3588 ret = btrfs_previous_item(root, path, ino, key_type);
3590 /* if ret == 0 there are items for this type,
3591 * create a range to tell us the last key of this type.
3592 * otherwise, there are no items in this directory after
3593 * *min_offset, and we create a range to indicate that.
3596 struct btrfs_key tmp;
3597 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3599 if (key_type == tmp.type)
3600 first_offset = max(min_offset, tmp.offset) + 1;
3605 /* go backward to find any previous key */
3606 ret = btrfs_previous_item(root, path, ino, key_type);
3608 struct btrfs_key tmp;
3609 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3610 if (key_type == tmp.type) {
3611 first_offset = tmp.offset;
3612 ret = overwrite_item(trans, log, dst_path,
3613 path->nodes[0], path->slots[0],
3621 btrfs_release_path(path);
3624 * Find the first key from this transaction again. See the note for
3625 * log_new_dir_dentries, if we're logging a directory recursively we
3626 * won't be holding its i_mutex, which means we can modify the directory
3627 * while we're logging it. If we remove an entry between our first
3628 * search and this search we'll not find the key again and can just
3631 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3636 * we have a block from this transaction, log every item in it
3637 * from our directory
3640 struct btrfs_key tmp;
3641 src = path->nodes[0];
3642 nritems = btrfs_header_nritems(src);
3643 for (i = path->slots[0]; i < nritems; i++) {
3644 struct btrfs_dir_item *di;
3646 btrfs_item_key_to_cpu(src, &min_key, i);
3648 if (min_key.objectid != ino || min_key.type != key_type)
3650 ret = overwrite_item(trans, log, dst_path, src, i,
3658 * We must make sure that when we log a directory entry,
3659 * the corresponding inode, after log replay, has a
3660 * matching link count. For example:
3666 * xfs_io -c "fsync" mydir
3668 * <mount fs and log replay>
3670 * Would result in a fsync log that when replayed, our
3671 * file inode would have a link count of 1, but we get
3672 * two directory entries pointing to the same inode.
3673 * After removing one of the names, it would not be
3674 * possible to remove the other name, which resulted
3675 * always in stale file handle errors, and would not
3676 * be possible to rmdir the parent directory, since
3677 * its i_size could never decrement to the value
3678 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3680 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3681 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3683 (btrfs_dir_transid(src, di) == trans->transid ||
3684 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3685 tmp.type != BTRFS_ROOT_ITEM_KEY)
3686 ctx->log_new_dentries = true;
3688 path->slots[0] = nritems;
3691 * look ahead to the next item and see if it is also
3692 * from this directory and from this transaction
3694 ret = btrfs_next_leaf(root, path);
3697 last_offset = (u64)-1;
3702 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3703 if (tmp.objectid != ino || tmp.type != key_type) {
3704 last_offset = (u64)-1;
3707 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3708 ret = overwrite_item(trans, log, dst_path,
3709 path->nodes[0], path->slots[0],
3714 last_offset = tmp.offset;
3719 btrfs_release_path(path);
3720 btrfs_release_path(dst_path);
3723 *last_offset_ret = last_offset;
3725 * insert the log range keys to indicate where the log
3728 ret = insert_dir_log_key(trans, log, path, key_type,
3729 ino, first_offset, last_offset);
3737 * logging directories is very similar to logging inodes, We find all the items
3738 * from the current transaction and write them to the log.
3740 * The recovery code scans the directory in the subvolume, and if it finds a
3741 * key in the range logged that is not present in the log tree, then it means
3742 * that dir entry was unlinked during the transaction.
3744 * In order for that scan to work, we must include one key smaller than
3745 * the smallest logged by this transaction and one key larger than the largest
3746 * key logged by this transaction.
3748 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3749 struct btrfs_root *root, struct btrfs_inode *inode,
3750 struct btrfs_path *path,
3751 struct btrfs_path *dst_path,
3752 struct btrfs_log_ctx *ctx)
3757 int key_type = BTRFS_DIR_ITEM_KEY;
3763 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3764 ctx, min_key, &max_key);
3767 if (max_key == (u64)-1)
3769 min_key = max_key + 1;
3772 if (key_type == BTRFS_DIR_ITEM_KEY) {
3773 key_type = BTRFS_DIR_INDEX_KEY;
3780 * a helper function to drop items from the log before we relog an
3781 * inode. max_key_type indicates the highest item type to remove.
3782 * This cannot be run for file data extents because it does not
3783 * free the extents they point to.
3785 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3786 struct btrfs_root *log,
3787 struct btrfs_path *path,
3788 u64 objectid, int max_key_type)
3791 struct btrfs_key key;
3792 struct btrfs_key found_key;
3795 key.objectid = objectid;
3796 key.type = max_key_type;
3797 key.offset = (u64)-1;
3800 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3801 BUG_ON(ret == 0); /* Logic error */
3805 if (path->slots[0] == 0)
3809 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3812 if (found_key.objectid != objectid)
3815 found_key.offset = 0;
3817 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3822 ret = btrfs_del_items(trans, log, path, start_slot,
3823 path->slots[0] - start_slot + 1);
3825 * If start slot isn't 0 then we don't need to re-search, we've
3826 * found the last guy with the objectid in this tree.
3828 if (ret || start_slot != 0)
3830 btrfs_release_path(path);
3832 btrfs_release_path(path);
3838 static void fill_inode_item(struct btrfs_trans_handle *trans,
3839 struct extent_buffer *leaf,
3840 struct btrfs_inode_item *item,
3841 struct inode *inode, int log_inode_only,
3844 struct btrfs_map_token token;
3846 btrfs_init_map_token(&token, leaf);
3848 if (log_inode_only) {
3849 /* set the generation to zero so the recover code
3850 * can tell the difference between an logging
3851 * just to say 'this inode exists' and a logging
3852 * to say 'update this inode with these values'
3854 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3855 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3857 btrfs_set_token_inode_generation(leaf, item,
3858 BTRFS_I(inode)->generation,
3860 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3863 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3864 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3865 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3866 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3868 btrfs_set_token_timespec_sec(leaf, &item->atime,
3869 inode->i_atime.tv_sec, &token);
3870 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3871 inode->i_atime.tv_nsec, &token);
3873 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3874 inode->i_mtime.tv_sec, &token);
3875 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3876 inode->i_mtime.tv_nsec, &token);
3878 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3879 inode->i_ctime.tv_sec, &token);
3880 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3881 inode->i_ctime.tv_nsec, &token);
3883 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3886 btrfs_set_token_inode_sequence(leaf, item,
3887 inode_peek_iversion(inode), &token);
3888 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3889 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3890 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3891 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3894 static int log_inode_item(struct btrfs_trans_handle *trans,
3895 struct btrfs_root *log, struct btrfs_path *path,
3896 struct btrfs_inode *inode)
3898 struct btrfs_inode_item *inode_item;
3901 ret = btrfs_insert_empty_item(trans, log, path,
3902 &inode->location, sizeof(*inode_item));
3903 if (ret && ret != -EEXIST)
3905 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3906 struct btrfs_inode_item);
3907 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
3909 btrfs_release_path(path);
3913 static int log_csums(struct btrfs_trans_handle *trans,
3914 struct btrfs_root *log_root,
3915 struct btrfs_ordered_sum *sums)
3920 * Due to extent cloning, we might have logged a csum item that covers a
3921 * subrange of a cloned extent, and later we can end up logging a csum
3922 * item for a larger subrange of the same extent or the entire range.
3923 * This would leave csum items in the log tree that cover the same range
3924 * and break the searches for checksums in the log tree, resulting in
3925 * some checksums missing in the fs/subvolume tree. So just delete (or
3926 * trim and adjust) any existing csum items in the log for this range.
3928 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
3932 return btrfs_csum_file_blocks(trans, log_root, sums);
3935 static noinline int copy_items(struct btrfs_trans_handle *trans,
3936 struct btrfs_inode *inode,
3937 struct btrfs_path *dst_path,
3938 struct btrfs_path *src_path, u64 *last_extent,
3939 int start_slot, int nr, int inode_only,
3942 struct btrfs_fs_info *fs_info = trans->fs_info;
3943 unsigned long src_offset;
3944 unsigned long dst_offset;
3945 struct btrfs_root *log = inode->root->log_root;
3946 struct btrfs_file_extent_item *extent;
3947 struct btrfs_inode_item *inode_item;
3948 struct extent_buffer *src = src_path->nodes[0];
3949 struct btrfs_key first_key, last_key, key;
3951 struct btrfs_key *ins_keys;
3955 struct list_head ordered_sums;
3956 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
3957 bool has_extents = false;
3958 bool need_find_last_extent = true;
3961 INIT_LIST_HEAD(&ordered_sums);
3963 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3964 nr * sizeof(u32), GFP_NOFS);
3968 first_key.objectid = (u64)-1;
3970 ins_sizes = (u32 *)ins_data;
3971 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3973 for (i = 0; i < nr; i++) {
3974 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3975 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3977 ret = btrfs_insert_empty_items(trans, log, dst_path,
3978 ins_keys, ins_sizes, nr);
3984 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3985 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3986 dst_path->slots[0]);
3988 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3991 last_key = ins_keys[i];
3993 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3994 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3996 struct btrfs_inode_item);
3997 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3999 inode_only == LOG_INODE_EXISTS,
4002 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
4003 src_offset, ins_sizes[i]);
4007 * We set need_find_last_extent here in case we know we were
4008 * processing other items and then walk into the first extent in
4009 * the inode. If we don't hit an extent then nothing changes,
4010 * we'll do the last search the next time around.
4012 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
4014 if (first_key.objectid == (u64)-1)
4015 first_key = ins_keys[i];
4017 need_find_last_extent = false;
4020 /* take a reference on file data extents so that truncates
4021 * or deletes of this inode don't have to relog the inode
4024 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
4027 extent = btrfs_item_ptr(src, start_slot + i,
4028 struct btrfs_file_extent_item);
4030 if (btrfs_file_extent_generation(src, extent) < trans->transid)
4033 found_type = btrfs_file_extent_type(src, extent);
4034 if (found_type == BTRFS_FILE_EXTENT_REG) {
4036 ds = btrfs_file_extent_disk_bytenr(src,
4038 /* ds == 0 is a hole */
4042 dl = btrfs_file_extent_disk_num_bytes(src,
4044 cs = btrfs_file_extent_offset(src, extent);
4045 cl = btrfs_file_extent_num_bytes(src,
4047 if (btrfs_file_extent_compression(src,
4053 ret = btrfs_lookup_csums_range(
4055 ds + cs, ds + cs + cl - 1,
4058 btrfs_release_path(dst_path);
4066 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4067 btrfs_release_path(dst_path);
4071 * we have to do this after the loop above to avoid changing the
4072 * log tree while trying to change the log tree.
4075 while (!list_empty(&ordered_sums)) {
4076 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4077 struct btrfs_ordered_sum,
4080 ret = log_csums(trans, log, sums);
4081 list_del(&sums->list);
4088 if (need_find_last_extent && *last_extent == first_key.offset) {
4090 * We don't have any leafs between our current one and the one
4091 * we processed before that can have file extent items for our
4092 * inode (and have a generation number smaller than our current
4095 need_find_last_extent = false;
4099 * Because we use btrfs_search_forward we could skip leaves that were
4100 * not modified and then assume *last_extent is valid when it really
4101 * isn't. So back up to the previous leaf and read the end of the last
4102 * extent before we go and fill in holes.
4104 if (need_find_last_extent) {
4107 ret = btrfs_prev_leaf(inode->root, src_path);
4112 if (src_path->slots[0])
4113 src_path->slots[0]--;
4114 src = src_path->nodes[0];
4115 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
4116 if (key.objectid != btrfs_ino(inode) ||
4117 key.type != BTRFS_EXTENT_DATA_KEY)
4119 extent = btrfs_item_ptr(src, src_path->slots[0],
4120 struct btrfs_file_extent_item);
4121 if (btrfs_file_extent_type(src, extent) ==
4122 BTRFS_FILE_EXTENT_INLINE) {
4123 len = btrfs_file_extent_ram_bytes(src, extent);
4124 *last_extent = ALIGN(key.offset + len,
4125 fs_info->sectorsize);
4127 len = btrfs_file_extent_num_bytes(src, extent);
4128 *last_extent = key.offset + len;
4132 /* So we did prev_leaf, now we need to move to the next leaf, but a few
4133 * things could have happened
4135 * 1) A merge could have happened, so we could currently be on a leaf
4136 * that holds what we were copying in the first place.
4137 * 2) A split could have happened, and now not all of the items we want
4138 * are on the same leaf.
4140 * So we need to adjust how we search for holes, we need to drop the
4141 * path and re-search for the first extent key we found, and then walk
4142 * forward until we hit the last one we copied.
4144 if (need_find_last_extent) {
4145 /* btrfs_prev_leaf could return 1 without releasing the path */
4146 btrfs_release_path(src_path);
4147 ret = btrfs_search_slot(NULL, inode->root, &first_key,
4152 src = src_path->nodes[0];
4153 i = src_path->slots[0];
4159 * Ok so here we need to go through and fill in any holes we may have
4160 * to make sure that holes are punched for those areas in case they had
4161 * extents previously.
4167 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
4168 ret = btrfs_next_leaf(inode->root, src_path);
4172 src = src_path->nodes[0];
4174 need_find_last_extent = true;
4177 btrfs_item_key_to_cpu(src, &key, i);
4178 if (!btrfs_comp_cpu_keys(&key, &last_key))
4180 if (key.objectid != btrfs_ino(inode) ||
4181 key.type != BTRFS_EXTENT_DATA_KEY) {
4185 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
4186 if (btrfs_file_extent_type(src, extent) ==
4187 BTRFS_FILE_EXTENT_INLINE) {
4188 len = btrfs_file_extent_ram_bytes(src, extent);
4189 extent_end = ALIGN(key.offset + len,
4190 fs_info->sectorsize);
4192 len = btrfs_file_extent_num_bytes(src, extent);
4193 extent_end = key.offset + len;
4197 if (*last_extent == key.offset) {
4198 *last_extent = extent_end;
4201 offset = *last_extent;
4202 len = key.offset - *last_extent;
4203 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
4204 offset, 0, 0, len, 0, len, 0, 0, 0);
4207 *last_extent = extent_end;
4211 * Check if there is a hole between the last extent found in our leaf
4212 * and the first extent in the next leaf. If there is one, we need to
4213 * log an explicit hole so that at replay time we can punch the hole.
4216 key.objectid == btrfs_ino(inode) &&
4217 key.type == BTRFS_EXTENT_DATA_KEY &&
4218 i == btrfs_header_nritems(src_path->nodes[0])) {
4219 ret = btrfs_next_leaf(inode->root, src_path);
4220 need_find_last_extent = true;
4223 } else if (ret == 0) {
4224 btrfs_item_key_to_cpu(src_path->nodes[0], &key,
4225 src_path->slots[0]);
4226 if (key.objectid == btrfs_ino(inode) &&
4227 key.type == BTRFS_EXTENT_DATA_KEY &&
4228 *last_extent < key.offset) {
4229 const u64 len = key.offset - *last_extent;
4231 ret = btrfs_insert_file_extent(trans, log,
4236 *last_extent += len;
4241 * Need to let the callers know we dropped the path so they should
4244 if (!ret && need_find_last_extent)
4249 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
4251 struct extent_map *em1, *em2;
4253 em1 = list_entry(a, struct extent_map, list);
4254 em2 = list_entry(b, struct extent_map, list);
4256 if (em1->start < em2->start)
4258 else if (em1->start > em2->start)
4263 static int log_extent_csums(struct btrfs_trans_handle *trans,
4264 struct btrfs_inode *inode,
4265 struct btrfs_root *log_root,
4266 const struct extent_map *em)
4270 LIST_HEAD(ordered_sums);
4273 if (inode->flags & BTRFS_INODE_NODATASUM ||
4274 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4275 em->block_start == EXTENT_MAP_HOLE)
4278 /* If we're compressed we have to save the entire range of csums. */
4279 if (em->compress_type) {
4281 csum_len = max(em->block_len, em->orig_block_len);
4283 csum_offset = em->mod_start - em->start;
4284 csum_len = em->mod_len;
4287 /* block start is already adjusted for the file extent offset. */
4288 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4289 em->block_start + csum_offset,
4290 em->block_start + csum_offset +
4291 csum_len - 1, &ordered_sums, 0);
4295 while (!list_empty(&ordered_sums)) {
4296 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4297 struct btrfs_ordered_sum,
4300 ret = log_csums(trans, log_root, sums);
4301 list_del(&sums->list);
4308 static int log_one_extent(struct btrfs_trans_handle *trans,
4309 struct btrfs_inode *inode, struct btrfs_root *root,
4310 const struct extent_map *em,
4311 struct btrfs_path *path,
4312 struct btrfs_log_ctx *ctx)
4314 struct btrfs_root *log = root->log_root;
4315 struct btrfs_file_extent_item *fi;
4316 struct extent_buffer *leaf;
4317 struct btrfs_map_token token;
4318 struct btrfs_key key;
4319 u64 extent_offset = em->start - em->orig_start;
4322 int extent_inserted = 0;
4324 ret = log_extent_csums(trans, inode, log, em);
4328 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
4329 em->start + em->len, NULL, 0, 1,
4330 sizeof(*fi), &extent_inserted);
4334 if (!extent_inserted) {
4335 key.objectid = btrfs_ino(inode);
4336 key.type = BTRFS_EXTENT_DATA_KEY;
4337 key.offset = em->start;
4339 ret = btrfs_insert_empty_item(trans, log, path, &key,
4344 leaf = path->nodes[0];
4345 btrfs_init_map_token(&token, leaf);
4346 fi = btrfs_item_ptr(leaf, path->slots[0],
4347 struct btrfs_file_extent_item);
4349 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4351 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4352 btrfs_set_token_file_extent_type(leaf, fi,
4353 BTRFS_FILE_EXTENT_PREALLOC,
4356 btrfs_set_token_file_extent_type(leaf, fi,
4357 BTRFS_FILE_EXTENT_REG,
4360 block_len = max(em->block_len, em->orig_block_len);
4361 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4362 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4365 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4367 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4368 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4370 extent_offset, &token);
4371 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4374 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4375 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4379 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4380 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4381 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4382 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4384 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4385 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4386 btrfs_mark_buffer_dirty(leaf);
4388 btrfs_release_path(path);
4394 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4395 * lose them after doing a fast fsync and replaying the log. We scan the
4396 * subvolume's root instead of iterating the inode's extent map tree because
4397 * otherwise we can log incorrect extent items based on extent map conversion.
4398 * That can happen due to the fact that extent maps are merged when they
4399 * are not in the extent map tree's list of modified extents.
4401 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4402 struct btrfs_inode *inode,
4403 struct btrfs_path *path)
4405 struct btrfs_root *root = inode->root;
4406 struct btrfs_key key;
4407 const u64 i_size = i_size_read(&inode->vfs_inode);
4408 const u64 ino = btrfs_ino(inode);
4409 struct btrfs_path *dst_path = NULL;
4410 u64 last_extent = (u64)-1;
4415 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4419 key.type = BTRFS_EXTENT_DATA_KEY;
4420 key.offset = i_size;
4421 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4426 struct extent_buffer *leaf = path->nodes[0];
4427 int slot = path->slots[0];
4429 if (slot >= btrfs_header_nritems(leaf)) {
4431 ret = copy_items(trans, inode, dst_path, path,
4432 &last_extent, start_slot,
4438 ret = btrfs_next_leaf(root, path);
4448 btrfs_item_key_to_cpu(leaf, &key, slot);
4449 if (key.objectid > ino)
4451 if (WARN_ON_ONCE(key.objectid < ino) ||
4452 key.type < BTRFS_EXTENT_DATA_KEY ||
4453 key.offset < i_size) {
4457 if (last_extent == (u64)-1) {
4458 last_extent = key.offset;
4460 * Avoid logging extent items logged in past fsync calls
4461 * and leading to duplicate keys in the log tree.
4464 ret = btrfs_truncate_inode_items(trans,
4468 BTRFS_EXTENT_DATA_KEY);
4469 } while (ret == -EAGAIN);
4478 dst_path = btrfs_alloc_path();
4486 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4487 start_slot, ins_nr, 1, 0);
4492 btrfs_release_path(path);
4493 btrfs_free_path(dst_path);
4497 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4498 struct btrfs_root *root,
4499 struct btrfs_inode *inode,
4500 struct btrfs_path *path,
4501 struct btrfs_log_ctx *ctx,
4505 struct extent_map *em, *n;
4506 struct list_head extents;
4507 struct extent_map_tree *tree = &inode->extent_tree;
4512 INIT_LIST_HEAD(&extents);
4514 write_lock(&tree->lock);
4515 test_gen = root->fs_info->last_trans_committed;
4517 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4519 * Skip extents outside our logging range. It's important to do
4520 * it for correctness because if we don't ignore them, we may
4521 * log them before their ordered extent completes, and therefore
4522 * we could log them without logging their respective checksums
4523 * (the checksum items are added to the csum tree at the very
4524 * end of btrfs_finish_ordered_io()). Also leave such extents
4525 * outside of our range in the list, since we may have another
4526 * ranged fsync in the near future that needs them. If an extent
4527 * outside our range corresponds to a hole, log it to avoid
4528 * leaving gaps between extents (fsck will complain when we are
4529 * not using the NO_HOLES feature).
4531 if ((em->start > end || em->start + em->len <= start) &&
4532 em->block_start != EXTENT_MAP_HOLE)
4535 list_del_init(&em->list);
4537 * Just an arbitrary number, this can be really CPU intensive
4538 * once we start getting a lot of extents, and really once we
4539 * have a bunch of extents we just want to commit since it will
4542 if (++num > 32768) {
4543 list_del_init(&tree->modified_extents);
4548 if (em->generation <= test_gen)
4551 /* We log prealloc extents beyond eof later. */
4552 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4553 em->start >= i_size_read(&inode->vfs_inode))
4556 /* Need a ref to keep it from getting evicted from cache */
4557 refcount_inc(&em->refs);
4558 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4559 list_add_tail(&em->list, &extents);
4563 list_sort(NULL, &extents, extent_cmp);
4565 while (!list_empty(&extents)) {
4566 em = list_entry(extents.next, struct extent_map, list);
4568 list_del_init(&em->list);
4571 * If we had an error we just need to delete everybody from our
4575 clear_em_logging(tree, em);
4576 free_extent_map(em);
4580 write_unlock(&tree->lock);
4582 ret = log_one_extent(trans, inode, root, em, path, ctx);
4583 write_lock(&tree->lock);
4584 clear_em_logging(tree, em);
4585 free_extent_map(em);
4587 WARN_ON(!list_empty(&extents));
4588 write_unlock(&tree->lock);
4590 btrfs_release_path(path);
4592 ret = btrfs_log_prealloc_extents(trans, inode, path);
4597 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4598 struct btrfs_path *path, u64 *size_ret)
4600 struct btrfs_key key;
4603 key.objectid = btrfs_ino(inode);
4604 key.type = BTRFS_INODE_ITEM_KEY;
4607 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4610 } else if (ret > 0) {
4613 struct btrfs_inode_item *item;
4615 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4616 struct btrfs_inode_item);
4617 *size_ret = btrfs_inode_size(path->nodes[0], item);
4619 * If the in-memory inode's i_size is smaller then the inode
4620 * size stored in the btree, return the inode's i_size, so
4621 * that we get a correct inode size after replaying the log
4622 * when before a power failure we had a shrinking truncate
4623 * followed by addition of a new name (rename / new hard link).
4624 * Otherwise return the inode size from the btree, to avoid
4625 * data loss when replaying a log due to previously doing a
4626 * write that expands the inode's size and logging a new name
4627 * immediately after.
4629 if (*size_ret > inode->vfs_inode.i_size)
4630 *size_ret = inode->vfs_inode.i_size;
4633 btrfs_release_path(path);
4638 * At the moment we always log all xattrs. This is to figure out at log replay
4639 * time which xattrs must have their deletion replayed. If a xattr is missing
4640 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4641 * because if a xattr is deleted, the inode is fsynced and a power failure
4642 * happens, causing the log to be replayed the next time the fs is mounted,
4643 * we want the xattr to not exist anymore (same behaviour as other filesystems
4644 * with a journal, ext3/4, xfs, f2fs, etc).
4646 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4647 struct btrfs_root *root,
4648 struct btrfs_inode *inode,
4649 struct btrfs_path *path,
4650 struct btrfs_path *dst_path)
4653 struct btrfs_key key;
4654 const u64 ino = btrfs_ino(inode);
4659 key.type = BTRFS_XATTR_ITEM_KEY;
4662 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4667 int slot = path->slots[0];
4668 struct extent_buffer *leaf = path->nodes[0];
4669 int nritems = btrfs_header_nritems(leaf);
4671 if (slot >= nritems) {
4673 u64 last_extent = 0;
4675 ret = copy_items(trans, inode, dst_path, path,
4676 &last_extent, start_slot,
4678 /* can't be 1, extent items aren't processed */
4684 ret = btrfs_next_leaf(root, path);
4692 btrfs_item_key_to_cpu(leaf, &key, slot);
4693 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4703 u64 last_extent = 0;
4705 ret = copy_items(trans, inode, dst_path, path,
4706 &last_extent, start_slot,
4708 /* can't be 1, extent items aren't processed */
4718 * If the no holes feature is enabled we need to make sure any hole between the
4719 * last extent and the i_size of our inode is explicitly marked in the log. This
4720 * is to make sure that doing something like:
4722 * 1) create file with 128Kb of data
4723 * 2) truncate file to 64Kb
4724 * 3) truncate file to 256Kb
4726 * 5) <crash/power failure>
4727 * 6) mount fs and trigger log replay
4729 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4730 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4731 * file correspond to a hole. The presence of explicit holes in a log tree is
4732 * what guarantees that log replay will remove/adjust file extent items in the
4735 * Here we do not need to care about holes between extents, that is already done
4736 * by copy_items(). We also only need to do this in the full sync path, where we
4737 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4738 * lookup the list of modified extent maps and if any represents a hole, we
4739 * insert a corresponding extent representing a hole in the log tree.
4741 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4742 struct btrfs_root *root,
4743 struct btrfs_inode *inode,
4744 struct btrfs_path *path)
4746 struct btrfs_fs_info *fs_info = root->fs_info;
4748 struct btrfs_key key;
4751 struct extent_buffer *leaf;
4752 struct btrfs_root *log = root->log_root;
4753 const u64 ino = btrfs_ino(inode);
4754 const u64 i_size = i_size_read(&inode->vfs_inode);
4756 if (!btrfs_fs_incompat(fs_info, NO_HOLES))
4760 key.type = BTRFS_EXTENT_DATA_KEY;
4761 key.offset = (u64)-1;
4763 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4768 ASSERT(path->slots[0] > 0);
4770 leaf = path->nodes[0];
4771 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4773 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4774 /* inode does not have any extents */
4778 struct btrfs_file_extent_item *extent;
4782 * If there's an extent beyond i_size, an explicit hole was
4783 * already inserted by copy_items().
4785 if (key.offset >= i_size)
4788 extent = btrfs_item_ptr(leaf, path->slots[0],
4789 struct btrfs_file_extent_item);
4791 if (btrfs_file_extent_type(leaf, extent) ==
4792 BTRFS_FILE_EXTENT_INLINE)
4795 len = btrfs_file_extent_num_bytes(leaf, extent);
4796 /* Last extent goes beyond i_size, no need to log a hole. */
4797 if (key.offset + len > i_size)
4799 hole_start = key.offset + len;
4800 hole_size = i_size - hole_start;
4802 btrfs_release_path(path);
4804 /* Last extent ends at i_size. */
4808 hole_size = ALIGN(hole_size, fs_info->sectorsize);
4809 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4810 hole_size, 0, hole_size, 0, 0, 0);
4815 * When we are logging a new inode X, check if it doesn't have a reference that
4816 * matches the reference from some other inode Y created in a past transaction
4817 * and that was renamed in the current transaction. If we don't do this, then at
4818 * log replay time we can lose inode Y (and all its files if it's a directory):
4821 * echo "hello world" > /mnt/x/foobar
4824 * mkdir /mnt/x # or touch /mnt/x
4825 * xfs_io -c fsync /mnt/x
4827 * mount fs, trigger log replay
4829 * After the log replay procedure, we would lose the first directory and all its
4830 * files (file foobar).
4831 * For the case where inode Y is not a directory we simply end up losing it:
4833 * echo "123" > /mnt/foo
4835 * mv /mnt/foo /mnt/bar
4836 * echo "abc" > /mnt/foo
4837 * xfs_io -c fsync /mnt/foo
4840 * We also need this for cases where a snapshot entry is replaced by some other
4841 * entry (file or directory) otherwise we end up with an unreplayable log due to
4842 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4843 * if it were a regular entry:
4846 * btrfs subvolume snapshot /mnt /mnt/x/snap
4847 * btrfs subvolume delete /mnt/x/snap
4850 * fsync /mnt/x or fsync some new file inside it
4853 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4854 * the same transaction.
4856 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4858 const struct btrfs_key *key,
4859 struct btrfs_inode *inode,
4860 u64 *other_ino, u64 *other_parent)
4863 struct btrfs_path *search_path;
4866 u32 item_size = btrfs_item_size_nr(eb, slot);
4868 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4870 search_path = btrfs_alloc_path();
4873 search_path->search_commit_root = 1;
4874 search_path->skip_locking = 1;
4876 while (cur_offset < item_size) {
4880 unsigned long name_ptr;
4881 struct btrfs_dir_item *di;
4883 if (key->type == BTRFS_INODE_REF_KEY) {
4884 struct btrfs_inode_ref *iref;
4886 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4887 parent = key->offset;
4888 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4889 name_ptr = (unsigned long)(iref + 1);
4890 this_len = sizeof(*iref) + this_name_len;
4892 struct btrfs_inode_extref *extref;
4894 extref = (struct btrfs_inode_extref *)(ptr +
4896 parent = btrfs_inode_extref_parent(eb, extref);
4897 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4898 name_ptr = (unsigned long)&extref->name;
4899 this_len = sizeof(*extref) + this_name_len;
4902 if (this_name_len > name_len) {
4905 new_name = krealloc(name, this_name_len, GFP_NOFS);
4910 name_len = this_name_len;
4914 read_extent_buffer(eb, name, name_ptr, this_name_len);
4915 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4916 parent, name, this_name_len, 0);
4917 if (di && !IS_ERR(di)) {
4918 struct btrfs_key di_key;
4920 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4922 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4923 if (di_key.objectid != key->objectid) {
4925 *other_ino = di_key.objectid;
4926 *other_parent = parent;
4934 } else if (IS_ERR(di)) {
4938 btrfs_release_path(search_path);
4940 cur_offset += this_len;
4944 btrfs_free_path(search_path);
4949 struct btrfs_ino_list {
4952 struct list_head list;
4955 static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
4956 struct btrfs_root *root,
4957 struct btrfs_path *path,
4958 struct btrfs_log_ctx *ctx,
4959 u64 ino, u64 parent)
4961 struct btrfs_ino_list *ino_elem;
4962 LIST_HEAD(inode_list);
4965 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
4968 ino_elem->ino = ino;
4969 ino_elem->parent = parent;
4970 list_add_tail(&ino_elem->list, &inode_list);
4972 while (!list_empty(&inode_list)) {
4973 struct btrfs_fs_info *fs_info = root->fs_info;
4974 struct btrfs_key key;
4975 struct inode *inode;
4977 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
4979 ino = ino_elem->ino;
4980 parent = ino_elem->parent;
4981 list_del(&ino_elem->list);
4986 btrfs_release_path(path);
4989 key.type = BTRFS_INODE_ITEM_KEY;
4991 inode = btrfs_iget(fs_info->sb, &key, root);
4993 * If the other inode that had a conflicting dir entry was
4994 * deleted in the current transaction, we need to log its parent
4997 if (IS_ERR(inode)) {
4998 ret = PTR_ERR(inode);
4999 if (ret == -ENOENT) {
5000 key.objectid = parent;
5001 inode = btrfs_iget(fs_info->sb, &key, root);
5002 if (IS_ERR(inode)) {
5003 ret = PTR_ERR(inode);
5005 ret = btrfs_log_inode(trans, root,
5007 LOG_OTHER_INODE_ALL,
5009 btrfs_add_delayed_iput(inode);
5015 * We are safe logging the other inode without acquiring its
5016 * lock as long as we log with the LOG_INODE_EXISTS mode. We
5017 * are safe against concurrent renames of the other inode as
5018 * well because during a rename we pin the log and update the
5019 * log with the new name before we unpin it.
5021 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5022 LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
5024 btrfs_add_delayed_iput(inode);
5029 key.type = BTRFS_INODE_REF_KEY;
5031 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5033 btrfs_add_delayed_iput(inode);
5038 struct extent_buffer *leaf = path->nodes[0];
5039 int slot = path->slots[0];
5041 u64 other_parent = 0;
5043 if (slot >= btrfs_header_nritems(leaf)) {
5044 ret = btrfs_next_leaf(root, path);
5047 } else if (ret > 0) {
5054 btrfs_item_key_to_cpu(leaf, &key, slot);
5055 if (key.objectid != ino ||
5056 (key.type != BTRFS_INODE_REF_KEY &&
5057 key.type != BTRFS_INODE_EXTREF_KEY)) {
5062 ret = btrfs_check_ref_name_override(leaf, slot, &key,
5063 BTRFS_I(inode), &other_ino,
5068 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5073 ino_elem->ino = other_ino;
5074 ino_elem->parent = other_parent;
5075 list_add_tail(&ino_elem->list, &inode_list);
5080 btrfs_add_delayed_iput(inode);
5086 /* log a single inode in the tree log.
5087 * At least one parent directory for this inode must exist in the tree
5088 * or be logged already.
5090 * Any items from this inode changed by the current transaction are copied
5091 * to the log tree. An extra reference is taken on any extents in this
5092 * file, allowing us to avoid a whole pile of corner cases around logging
5093 * blocks that have been removed from the tree.
5095 * See LOG_INODE_ALL and related defines for a description of what inode_only
5098 * This handles both files and directories.
5100 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5101 struct btrfs_root *root, struct btrfs_inode *inode,
5105 struct btrfs_log_ctx *ctx)
5107 struct btrfs_fs_info *fs_info = root->fs_info;
5108 struct btrfs_path *path;
5109 struct btrfs_path *dst_path;
5110 struct btrfs_key min_key;
5111 struct btrfs_key max_key;
5112 struct btrfs_root *log = root->log_root;
5113 u64 last_extent = 0;
5117 int ins_start_slot = 0;
5119 bool fast_search = false;
5120 u64 ino = btrfs_ino(inode);
5121 struct extent_map_tree *em_tree = &inode->extent_tree;
5122 u64 logged_isize = 0;
5123 bool need_log_inode_item = true;
5124 bool xattrs_logged = false;
5125 bool recursive_logging = false;
5127 path = btrfs_alloc_path();
5130 dst_path = btrfs_alloc_path();
5132 btrfs_free_path(path);
5136 min_key.objectid = ino;
5137 min_key.type = BTRFS_INODE_ITEM_KEY;
5140 max_key.objectid = ino;
5143 /* today the code can only do partial logging of directories */
5144 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5145 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5146 &inode->runtime_flags) &&
5147 inode_only >= LOG_INODE_EXISTS))
5148 max_key.type = BTRFS_XATTR_ITEM_KEY;
5150 max_key.type = (u8)-1;
5151 max_key.offset = (u64)-1;
5154 * Only run delayed items if we are a dir or a new file.
5155 * Otherwise commit the delayed inode only, which is needed in
5156 * order for the log replay code to mark inodes for link count
5157 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
5159 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5160 inode->generation > fs_info->last_trans_committed)
5161 ret = btrfs_commit_inode_delayed_items(trans, inode);
5163 ret = btrfs_commit_inode_delayed_inode(inode);
5166 btrfs_free_path(path);
5167 btrfs_free_path(dst_path);
5171 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
5172 recursive_logging = true;
5173 if (inode_only == LOG_OTHER_INODE)
5174 inode_only = LOG_INODE_EXISTS;
5176 inode_only = LOG_INODE_ALL;
5177 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5179 mutex_lock(&inode->log_mutex);
5183 * a brute force approach to making sure we get the most uptodate
5184 * copies of everything.
5186 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5187 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5189 if (inode_only == LOG_INODE_EXISTS)
5190 max_key_type = BTRFS_XATTR_ITEM_KEY;
5191 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
5193 if (inode_only == LOG_INODE_EXISTS) {
5195 * Make sure the new inode item we write to the log has
5196 * the same isize as the current one (if it exists).
5197 * This is necessary to prevent data loss after log
5198 * replay, and also to prevent doing a wrong expanding
5199 * truncate - for e.g. create file, write 4K into offset
5200 * 0, fsync, write 4K into offset 4096, add hard link,
5201 * fsync some other file (to sync log), power fail - if
5202 * we use the inode's current i_size, after log replay
5203 * we get a 8Kb file, with the last 4Kb extent as a hole
5204 * (zeroes), as if an expanding truncate happened,
5205 * instead of getting a file of 4Kb only.
5207 err = logged_inode_size(log, inode, path, &logged_isize);
5211 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5212 &inode->runtime_flags)) {
5213 if (inode_only == LOG_INODE_EXISTS) {
5214 max_key.type = BTRFS_XATTR_ITEM_KEY;
5215 ret = drop_objectid_items(trans, log, path, ino,
5218 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5219 &inode->runtime_flags);
5220 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5221 &inode->runtime_flags);
5223 ret = btrfs_truncate_inode_items(trans,
5224 log, &inode->vfs_inode, 0, 0);
5229 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5230 &inode->runtime_flags) ||
5231 inode_only == LOG_INODE_EXISTS) {
5232 if (inode_only == LOG_INODE_ALL)
5234 max_key.type = BTRFS_XATTR_ITEM_KEY;
5235 ret = drop_objectid_items(trans, log, path, ino,
5238 if (inode_only == LOG_INODE_ALL)
5251 ret = btrfs_search_forward(root, &min_key,
5252 path, trans->transid);
5260 /* note, ins_nr might be > 0 here, cleanup outside the loop */
5261 if (min_key.objectid != ino)
5263 if (min_key.type > max_key.type)
5266 if (min_key.type == BTRFS_INODE_ITEM_KEY)
5267 need_log_inode_item = false;
5269 if ((min_key.type == BTRFS_INODE_REF_KEY ||
5270 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
5271 inode->generation == trans->transid &&
5272 !recursive_logging) {
5274 u64 other_parent = 0;
5276 ret = btrfs_check_ref_name_override(path->nodes[0],
5277 path->slots[0], &min_key, inode,
5278 &other_ino, &other_parent);
5282 } else if (ret > 0 && ctx &&
5283 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5288 ins_start_slot = path->slots[0];
5290 ret = copy_items(trans, inode, dst_path, path,
5291 &last_extent, ins_start_slot,
5300 err = log_conflicting_inodes(trans, root, path,
5301 ctx, other_ino, other_parent);
5304 btrfs_release_path(path);
5309 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5310 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
5313 ret = copy_items(trans, inode, dst_path, path,
5314 &last_extent, ins_start_slot,
5315 ins_nr, inode_only, logged_isize);
5322 btrfs_release_path(path);
5328 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5331 } else if (!ins_nr) {
5332 ins_start_slot = path->slots[0];
5337 ret = copy_items(trans, inode, dst_path, path, &last_extent,
5338 ins_start_slot, ins_nr, inode_only,
5346 btrfs_release_path(path);
5350 ins_start_slot = path->slots[0];
5353 nritems = btrfs_header_nritems(path->nodes[0]);
5355 if (path->slots[0] < nritems) {
5356 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
5361 ret = copy_items(trans, inode, dst_path, path,
5362 &last_extent, ins_start_slot,
5363 ins_nr, inode_only, logged_isize);
5371 btrfs_release_path(path);
5373 if (min_key.offset < (u64)-1) {
5375 } else if (min_key.type < max_key.type) {
5383 ret = copy_items(trans, inode, dst_path, path, &last_extent,
5384 ins_start_slot, ins_nr, inode_only,
5394 btrfs_release_path(path);
5395 btrfs_release_path(dst_path);
5396 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5399 xattrs_logged = true;
5400 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5401 btrfs_release_path(path);
5402 btrfs_release_path(dst_path);
5403 err = btrfs_log_trailing_hole(trans, root, inode, path);
5408 btrfs_release_path(path);
5409 btrfs_release_path(dst_path);
5410 if (need_log_inode_item) {
5411 err = log_inode_item(trans, log, dst_path, inode);
5412 if (!err && !xattrs_logged) {
5413 err = btrfs_log_all_xattrs(trans, root, inode, path,
5415 btrfs_release_path(path);
5421 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5427 } else if (inode_only == LOG_INODE_ALL) {
5428 struct extent_map *em, *n;
5430 write_lock(&em_tree->lock);
5432 * We can't just remove every em if we're called for a ranged
5433 * fsync - that is, one that doesn't cover the whole possible
5434 * file range (0 to LLONG_MAX). This is because we can have
5435 * em's that fall outside the range we're logging and therefore
5436 * their ordered operations haven't completed yet
5437 * (btrfs_finish_ordered_io() not invoked yet). This means we
5438 * didn't get their respective file extent item in the fs/subvol
5439 * tree yet, and need to let the next fast fsync (one which
5440 * consults the list of modified extent maps) find the em so
5441 * that it logs a matching file extent item and waits for the
5442 * respective ordered operation to complete (if it's still
5445 * Removing every em outside the range we're logging would make
5446 * the next fast fsync not log their matching file extent items,
5447 * therefore making us lose data after a log replay.
5449 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
5451 const u64 mod_end = em->mod_start + em->mod_len - 1;
5453 if (em->mod_start >= start && mod_end <= end)
5454 list_del_init(&em->list);
5456 write_unlock(&em_tree->lock);
5459 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5460 ret = log_directory_changes(trans, root, inode, path, dst_path,
5469 * Don't update last_log_commit if we logged that an inode exists after
5470 * it was loaded to memory (full_sync bit set).
5471 * This is to prevent data loss when we do a write to the inode, then
5472 * the inode gets evicted after all delalloc was flushed, then we log
5473 * it exists (due to a rename for example) and then fsync it. This last
5474 * fsync would do nothing (not logging the extents previously written).
5476 spin_lock(&inode->lock);
5477 inode->logged_trans = trans->transid;
5478 if (inode_only != LOG_INODE_EXISTS ||
5479 !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
5480 inode->last_log_commit = inode->last_sub_trans;
5481 spin_unlock(&inode->lock);
5483 mutex_unlock(&inode->log_mutex);
5485 btrfs_free_path(path);
5486 btrfs_free_path(dst_path);
5491 * Check if we must fallback to a transaction commit when logging an inode.
5492 * This must be called after logging the inode and is used only in the context
5493 * when fsyncing an inode requires the need to log some other inode - in which
5494 * case we can't lock the i_mutex of each other inode we need to log as that
5495 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5496 * log inodes up or down in the hierarchy) or rename operations for example. So
5497 * we take the log_mutex of the inode after we have logged it and then check for
5498 * its last_unlink_trans value - this is safe because any task setting
5499 * last_unlink_trans must take the log_mutex and it must do this before it does
5500 * the actual unlink operation, so if we do this check before a concurrent task
5501 * sets last_unlink_trans it means we've logged a consistent version/state of
5502 * all the inode items, otherwise we are not sure and must do a transaction
5503 * commit (the concurrent task might have only updated last_unlink_trans before
5504 * we logged the inode or it might have also done the unlink).
5506 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5507 struct btrfs_inode *inode)
5509 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5512 mutex_lock(&inode->log_mutex);
5513 if (inode->last_unlink_trans > fs_info->last_trans_committed) {
5515 * Make sure any commits to the log are forced to be full
5518 btrfs_set_log_full_commit(trans);
5521 mutex_unlock(&inode->log_mutex);
5527 * follow the dentry parent pointers up the chain and see if any
5528 * of the directories in it require a full commit before they can
5529 * be logged. Returns zero if nothing special needs to be done or 1 if
5530 * a full commit is required.
5532 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5533 struct btrfs_inode *inode,
5534 struct dentry *parent,
5535 struct super_block *sb,
5539 struct dentry *old_parent = NULL;
5542 * for regular files, if its inode is already on disk, we don't
5543 * have to worry about the parents at all. This is because
5544 * we can use the last_unlink_trans field to record renames
5545 * and other fun in this file.
5547 if (S_ISREG(inode->vfs_inode.i_mode) &&
5548 inode->generation <= last_committed &&
5549 inode->last_unlink_trans <= last_committed)
5552 if (!S_ISDIR(inode->vfs_inode.i_mode)) {
5553 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5555 inode = BTRFS_I(d_inode(parent));
5559 if (btrfs_must_commit_transaction(trans, inode)) {
5564 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5567 if (IS_ROOT(parent)) {
5568 inode = BTRFS_I(d_inode(parent));
5569 if (btrfs_must_commit_transaction(trans, inode))
5574 parent = dget_parent(parent);
5576 old_parent = parent;
5577 inode = BTRFS_I(d_inode(parent));
5585 struct btrfs_dir_list {
5587 struct list_head list;
5591 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5592 * details about the why it is needed.
5593 * This is a recursive operation - if an existing dentry corresponds to a
5594 * directory, that directory's new entries are logged too (same behaviour as
5595 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5596 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5597 * complains about the following circular lock dependency / possible deadlock:
5601 * lock(&type->i_mutex_dir_key#3/2);
5602 * lock(sb_internal#2);
5603 * lock(&type->i_mutex_dir_key#3/2);
5604 * lock(&sb->s_type->i_mutex_key#14);
5606 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5607 * sb_start_intwrite() in btrfs_start_transaction().
5608 * Not locking i_mutex of the inodes is still safe because:
5610 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5611 * that while logging the inode new references (names) are added or removed
5612 * from the inode, leaving the logged inode item with a link count that does
5613 * not match the number of logged inode reference items. This is fine because
5614 * at log replay time we compute the real number of links and correct the
5615 * link count in the inode item (see replay_one_buffer() and
5616 * link_to_fixup_dir());
5618 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5619 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5620 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5621 * has a size that doesn't match the sum of the lengths of all the logged
5622 * names. This does not result in a problem because if a dir_item key is
5623 * logged but its matching dir_index key is not logged, at log replay time we
5624 * don't use it to replay the respective name (see replay_one_name()). On the
5625 * other hand if only the dir_index key ends up being logged, the respective
5626 * name is added to the fs/subvol tree with both the dir_item and dir_index
5627 * keys created (see replay_one_name()).
5628 * The directory's inode item with a wrong i_size is not a problem as well,
5629 * since we don't use it at log replay time to set the i_size in the inode
5630 * item of the fs/subvol tree (see overwrite_item()).
5632 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5633 struct btrfs_root *root,
5634 struct btrfs_inode *start_inode,
5635 struct btrfs_log_ctx *ctx)
5637 struct btrfs_fs_info *fs_info = root->fs_info;
5638 struct btrfs_root *log = root->log_root;
5639 struct btrfs_path *path;
5640 LIST_HEAD(dir_list);
5641 struct btrfs_dir_list *dir_elem;
5644 path = btrfs_alloc_path();
5648 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5650 btrfs_free_path(path);
5653 dir_elem->ino = btrfs_ino(start_inode);
5654 list_add_tail(&dir_elem->list, &dir_list);
5656 while (!list_empty(&dir_list)) {
5657 struct extent_buffer *leaf;
5658 struct btrfs_key min_key;
5662 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5665 goto next_dir_inode;
5667 min_key.objectid = dir_elem->ino;
5668 min_key.type = BTRFS_DIR_ITEM_KEY;
5671 btrfs_release_path(path);
5672 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5674 goto next_dir_inode;
5675 } else if (ret > 0) {
5677 goto next_dir_inode;
5681 leaf = path->nodes[0];
5682 nritems = btrfs_header_nritems(leaf);
5683 for (i = path->slots[0]; i < nritems; i++) {
5684 struct btrfs_dir_item *di;
5685 struct btrfs_key di_key;
5686 struct inode *di_inode;
5687 struct btrfs_dir_list *new_dir_elem;
5688 int log_mode = LOG_INODE_EXISTS;
5691 btrfs_item_key_to_cpu(leaf, &min_key, i);
5692 if (min_key.objectid != dir_elem->ino ||
5693 min_key.type != BTRFS_DIR_ITEM_KEY)
5694 goto next_dir_inode;
5696 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5697 type = btrfs_dir_type(leaf, di);
5698 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5699 type != BTRFS_FT_DIR)
5701 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5702 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5705 btrfs_release_path(path);
5706 di_inode = btrfs_iget(fs_info->sb, &di_key, root);
5707 if (IS_ERR(di_inode)) {
5708 ret = PTR_ERR(di_inode);
5709 goto next_dir_inode;
5712 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5713 btrfs_add_delayed_iput(di_inode);
5717 ctx->log_new_dentries = false;
5718 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5719 log_mode = LOG_INODE_ALL;
5720 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5721 log_mode, 0, LLONG_MAX, ctx);
5723 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5725 btrfs_add_delayed_iput(di_inode);
5727 goto next_dir_inode;
5728 if (ctx->log_new_dentries) {
5729 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5731 if (!new_dir_elem) {
5733 goto next_dir_inode;
5735 new_dir_elem->ino = di_key.objectid;
5736 list_add_tail(&new_dir_elem->list, &dir_list);
5741 ret = btrfs_next_leaf(log, path);
5743 goto next_dir_inode;
5744 } else if (ret > 0) {
5746 goto next_dir_inode;
5750 if (min_key.offset < (u64)-1) {
5755 list_del(&dir_elem->list);
5759 btrfs_free_path(path);
5763 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5764 struct btrfs_inode *inode,
5765 struct btrfs_log_ctx *ctx)
5767 struct btrfs_fs_info *fs_info = trans->fs_info;
5769 struct btrfs_path *path;
5770 struct btrfs_key key;
5771 struct btrfs_root *root = inode->root;
5772 const u64 ino = btrfs_ino(inode);
5774 path = btrfs_alloc_path();
5777 path->skip_locking = 1;
5778 path->search_commit_root = 1;
5781 key.type = BTRFS_INODE_REF_KEY;
5783 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5788 struct extent_buffer *leaf = path->nodes[0];
5789 int slot = path->slots[0];
5794 if (slot >= btrfs_header_nritems(leaf)) {
5795 ret = btrfs_next_leaf(root, path);
5803 btrfs_item_key_to_cpu(leaf, &key, slot);
5804 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5805 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5808 item_size = btrfs_item_size_nr(leaf, slot);
5809 ptr = btrfs_item_ptr_offset(leaf, slot);
5810 while (cur_offset < item_size) {
5811 struct btrfs_key inode_key;
5812 struct inode *dir_inode;
5814 inode_key.type = BTRFS_INODE_ITEM_KEY;
5815 inode_key.offset = 0;
5817 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5818 struct btrfs_inode_extref *extref;
5820 extref = (struct btrfs_inode_extref *)
5822 inode_key.objectid = btrfs_inode_extref_parent(
5824 cur_offset += sizeof(*extref);
5825 cur_offset += btrfs_inode_extref_name_len(leaf,
5828 inode_key.objectid = key.offset;
5829 cur_offset = item_size;
5832 dir_inode = btrfs_iget(fs_info->sb, &inode_key, root);
5834 * If the parent inode was deleted, return an error to
5835 * fallback to a transaction commit. This is to prevent
5836 * getting an inode that was moved from one parent A to
5837 * a parent B, got its former parent A deleted and then
5838 * it got fsync'ed, from existing at both parents after
5839 * a log replay (and the old parent still existing).
5846 * mv /mnt/B/bar /mnt/A/bar
5847 * mv -T /mnt/A /mnt/B
5851 * If we ignore the old parent B which got deleted,
5852 * after a log replay we would have file bar linked
5853 * at both parents and the old parent B would still
5856 if (IS_ERR(dir_inode)) {
5857 ret = PTR_ERR(dir_inode);
5862 ctx->log_new_dentries = false;
5863 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5864 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5866 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
5868 if (!ret && ctx && ctx->log_new_dentries)
5869 ret = log_new_dir_dentries(trans, root,
5870 BTRFS_I(dir_inode), ctx);
5871 btrfs_add_delayed_iput(dir_inode);
5879 btrfs_free_path(path);
5883 static int log_new_ancestors(struct btrfs_trans_handle *trans,
5884 struct btrfs_root *root,
5885 struct btrfs_path *path,
5886 struct btrfs_log_ctx *ctx)
5888 struct btrfs_key found_key;
5890 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
5893 struct btrfs_fs_info *fs_info = root->fs_info;
5894 const u64 last_committed = fs_info->last_trans_committed;
5895 struct extent_buffer *leaf = path->nodes[0];
5896 int slot = path->slots[0];
5897 struct btrfs_key search_key;
5898 struct inode *inode;
5901 btrfs_release_path(path);
5903 search_key.objectid = found_key.offset;
5904 search_key.type = BTRFS_INODE_ITEM_KEY;
5905 search_key.offset = 0;
5906 inode = btrfs_iget(fs_info->sb, &search_key, root);
5908 return PTR_ERR(inode);
5910 if (BTRFS_I(inode)->generation > last_committed)
5911 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5914 btrfs_add_delayed_iput(inode);
5918 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID)
5921 search_key.type = BTRFS_INODE_REF_KEY;
5922 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
5926 leaf = path->nodes[0];
5927 slot = path->slots[0];
5928 if (slot >= btrfs_header_nritems(leaf)) {
5929 ret = btrfs_next_leaf(root, path);
5934 leaf = path->nodes[0];
5935 slot = path->slots[0];
5938 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5939 if (found_key.objectid != search_key.objectid ||
5940 found_key.type != BTRFS_INODE_REF_KEY)
5946 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
5947 struct btrfs_inode *inode,
5948 struct dentry *parent,
5949 struct btrfs_log_ctx *ctx)
5951 struct btrfs_root *root = inode->root;
5952 struct btrfs_fs_info *fs_info = root->fs_info;
5953 struct dentry *old_parent = NULL;
5954 struct super_block *sb = inode->vfs_inode.i_sb;
5958 if (!parent || d_really_is_negative(parent) ||
5962 inode = BTRFS_I(d_inode(parent));
5963 if (root != inode->root)
5966 if (inode->generation > fs_info->last_trans_committed) {
5967 ret = btrfs_log_inode(trans, root, inode,
5968 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx);
5972 if (IS_ROOT(parent))
5975 parent = dget_parent(parent);
5977 old_parent = parent;
5984 static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
5985 struct btrfs_inode *inode,
5986 struct dentry *parent,
5987 struct btrfs_log_ctx *ctx)
5989 struct btrfs_root *root = inode->root;
5990 const u64 ino = btrfs_ino(inode);
5991 struct btrfs_path *path;
5992 struct btrfs_key search_key;
5996 * For a single hard link case, go through a fast path that does not
5997 * need to iterate the fs/subvolume tree.
5999 if (inode->vfs_inode.i_nlink < 2)
6000 return log_new_ancestors_fast(trans, inode, parent, ctx);
6002 path = btrfs_alloc_path();
6006 search_key.objectid = ino;
6007 search_key.type = BTRFS_INODE_REF_KEY;
6008 search_key.offset = 0;
6010 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6017 struct extent_buffer *leaf = path->nodes[0];
6018 int slot = path->slots[0];
6019 struct btrfs_key found_key;
6021 if (slot >= btrfs_header_nritems(leaf)) {
6022 ret = btrfs_next_leaf(root, path);
6030 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6031 if (found_key.objectid != ino ||
6032 found_key.type > BTRFS_INODE_EXTREF_KEY)
6036 * Don't deal with extended references because they are rare
6037 * cases and too complex to deal with (we would need to keep
6038 * track of which subitem we are processing for each item in
6039 * this loop, etc). So just return some error to fallback to
6040 * a transaction commit.
6042 if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
6048 * Logging ancestors needs to do more searches on the fs/subvol
6049 * tree, so it releases the path as needed to avoid deadlocks.
6050 * Keep track of the last inode ref key and resume from that key
6051 * after logging all new ancestors for the current hard link.
6053 memcpy(&search_key, &found_key, sizeof(search_key));
6055 ret = log_new_ancestors(trans, root, path, ctx);
6058 btrfs_release_path(path);
6063 btrfs_free_path(path);
6068 * helper function around btrfs_log_inode to make sure newly created
6069 * parent directories also end up in the log. A minimal inode and backref
6070 * only logging is done of any parent directories that are older than
6071 * the last committed transaction
6073 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
6074 struct btrfs_inode *inode,
6075 struct dentry *parent,
6079 struct btrfs_log_ctx *ctx)
6081 struct btrfs_root *root = inode->root;
6082 struct btrfs_fs_info *fs_info = root->fs_info;
6083 struct super_block *sb;
6085 u64 last_committed = fs_info->last_trans_committed;
6086 bool log_dentries = false;
6088 sb = inode->vfs_inode.i_sb;
6090 if (btrfs_test_opt(fs_info, NOTREELOG)) {
6096 * The prev transaction commit doesn't complete, we need do
6097 * full commit by ourselves.
6099 if (fs_info->last_trans_log_full_commit >
6100 fs_info->last_trans_committed) {
6105 if (btrfs_root_refs(&root->root_item) == 0) {
6110 ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
6116 * Skip already logged inodes or inodes corresponding to tmpfiles
6117 * (since logging them is pointless, a link count of 0 means they
6118 * will never be accessible).
6120 if (btrfs_inode_in_log(inode, trans->transid) ||
6121 inode->vfs_inode.i_nlink == 0) {
6122 ret = BTRFS_NO_LOG_SYNC;
6126 ret = start_log_trans(trans, root, ctx);
6130 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
6135 * for regular files, if its inode is already on disk, we don't
6136 * have to worry about the parents at all. This is because
6137 * we can use the last_unlink_trans field to record renames
6138 * and other fun in this file.
6140 if (S_ISREG(inode->vfs_inode.i_mode) &&
6141 inode->generation <= last_committed &&
6142 inode->last_unlink_trans <= last_committed) {
6147 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
6148 log_dentries = true;
6151 * On unlink we must make sure all our current and old parent directory
6152 * inodes are fully logged. This is to prevent leaving dangling
6153 * directory index entries in directories that were our parents but are
6154 * not anymore. Not doing this results in old parent directory being
6155 * impossible to delete after log replay (rmdir will always fail with
6156 * error -ENOTEMPTY).
6162 * ln testdir/foo testdir/bar
6164 * unlink testdir/bar
6165 * xfs_io -c fsync testdir/foo
6167 * mount fs, triggers log replay
6169 * If we don't log the parent directory (testdir), after log replay the
6170 * directory still has an entry pointing to the file inode using the bar
6171 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
6172 * the file inode has a link count of 1.
6178 * ln foo testdir/foo2
6179 * ln foo testdir/foo3
6181 * unlink testdir/foo3
6182 * xfs_io -c fsync foo
6184 * mount fs, triggers log replay
6186 * Similar as the first example, after log replay the parent directory
6187 * testdir still has an entry pointing to the inode file with name foo3
6188 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
6189 * and has a link count of 2.
6191 if (inode->last_unlink_trans > last_committed) {
6192 ret = btrfs_log_all_parents(trans, inode, ctx);
6197 ret = log_all_new_ancestors(trans, inode, parent, ctx);
6202 ret = log_new_dir_dentries(trans, root, inode, ctx);
6207 btrfs_set_log_full_commit(trans);
6212 btrfs_remove_log_ctx(root, ctx);
6213 btrfs_end_log_trans(root);
6219 * it is not safe to log dentry if the chunk root has added new
6220 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
6221 * If this returns 1, you must commit the transaction to safely get your
6224 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
6225 struct dentry *dentry,
6228 struct btrfs_log_ctx *ctx)
6230 struct dentry *parent = dget_parent(dentry);
6233 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
6234 start, end, LOG_INODE_ALL, ctx);
6241 * should be called during mount to recover any replay any log trees
6244 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
6247 struct btrfs_path *path;
6248 struct btrfs_trans_handle *trans;
6249 struct btrfs_key key;
6250 struct btrfs_key found_key;
6251 struct btrfs_key tmp_key;
6252 struct btrfs_root *log;
6253 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6254 struct walk_control wc = {
6255 .process_func = process_one_buffer,
6256 .stage = LOG_WALK_PIN_ONLY,
6259 path = btrfs_alloc_path();
6263 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6265 trans = btrfs_start_transaction(fs_info->tree_root, 0);
6266 if (IS_ERR(trans)) {
6267 ret = PTR_ERR(trans);
6274 ret = walk_log_tree(trans, log_root_tree, &wc);
6276 btrfs_handle_fs_error(fs_info, ret,
6277 "Failed to pin buffers while recovering log root tree.");
6282 key.objectid = BTRFS_TREE_LOG_OBJECTID;
6283 key.offset = (u64)-1;
6284 key.type = BTRFS_ROOT_ITEM_KEY;
6287 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6290 btrfs_handle_fs_error(fs_info, ret,
6291 "Couldn't find tree log root.");
6295 if (path->slots[0] == 0)
6299 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6301 btrfs_release_path(path);
6302 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6305 log = btrfs_read_fs_root(log_root_tree, &found_key);
6308 btrfs_handle_fs_error(fs_info, ret,
6309 "Couldn't read tree log root.");
6313 tmp_key.objectid = found_key.offset;
6314 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
6315 tmp_key.offset = (u64)-1;
6317 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
6318 if (IS_ERR(wc.replay_dest)) {
6319 ret = PTR_ERR(wc.replay_dest);
6322 * We didn't find the subvol, likely because it was
6323 * deleted. This is ok, simply skip this log and go to
6326 * We need to exclude the root because we can't have
6327 * other log replays overwriting this log as we'll read
6328 * it back in a few more times. This will keep our
6329 * block from being modified, and we'll just bail for
6330 * each subsequent pass.
6333 ret = btrfs_pin_extent_for_log_replay(fs_info,
6336 free_extent_buffer(log->node);
6337 free_extent_buffer(log->commit_root);
6342 btrfs_handle_fs_error(fs_info, ret,
6343 "Couldn't read target root for tree log recovery.");
6347 wc.replay_dest->log_root = log;
6348 btrfs_record_root_in_trans(trans, wc.replay_dest);
6349 ret = walk_log_tree(trans, log, &wc);
6351 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6352 ret = fixup_inode_link_counts(trans, wc.replay_dest,
6356 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6357 struct btrfs_root *root = wc.replay_dest;
6359 btrfs_release_path(path);
6362 * We have just replayed everything, and the highest
6363 * objectid of fs roots probably has changed in case
6364 * some inode_item's got replayed.
6366 * root->objectid_mutex is not acquired as log replay
6367 * could only happen during mount.
6369 ret = btrfs_find_highest_objectid(root,
6370 &root->highest_objectid);
6373 wc.replay_dest->log_root = NULL;
6374 free_extent_buffer(log->node);
6375 free_extent_buffer(log->commit_root);
6381 if (found_key.offset == 0)
6383 key.offset = found_key.offset - 1;
6385 btrfs_release_path(path);
6387 /* step one is to pin it all, step two is to replay just inodes */
6390 wc.process_func = replay_one_buffer;
6391 wc.stage = LOG_WALK_REPLAY_INODES;
6394 /* step three is to replay everything */
6395 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6400 btrfs_free_path(path);
6402 /* step 4: commit the transaction, which also unpins the blocks */
6403 ret = btrfs_commit_transaction(trans);
6407 free_extent_buffer(log_root_tree->node);
6408 log_root_tree->log_root = NULL;
6409 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6410 kfree(log_root_tree);
6415 btrfs_end_transaction(wc.trans);
6416 btrfs_free_path(path);
6421 * there are some corner cases where we want to force a full
6422 * commit instead of allowing a directory to be logged.
6424 * They revolve around files there were unlinked from the directory, and
6425 * this function updates the parent directory so that a full commit is
6426 * properly done if it is fsync'd later after the unlinks are done.
6428 * Must be called before the unlink operations (updates to the subvolume tree,
6429 * inodes, etc) are done.
6431 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6432 struct btrfs_inode *dir, struct btrfs_inode *inode,
6436 * when we're logging a file, if it hasn't been renamed
6437 * or unlinked, and its inode is fully committed on disk,
6438 * we don't have to worry about walking up the directory chain
6439 * to log its parents.
6441 * So, we use the last_unlink_trans field to put this transid
6442 * into the file. When the file is logged we check it and
6443 * don't log the parents if the file is fully on disk.
6445 mutex_lock(&inode->log_mutex);
6446 inode->last_unlink_trans = trans->transid;
6447 mutex_unlock(&inode->log_mutex);
6450 * if this directory was already logged any new
6451 * names for this file/dir will get recorded
6453 if (dir->logged_trans == trans->transid)
6457 * if the inode we're about to unlink was logged,
6458 * the log will be properly updated for any new names
6460 if (inode->logged_trans == trans->transid)
6464 * when renaming files across directories, if the directory
6465 * there we're unlinking from gets fsync'd later on, there's
6466 * no way to find the destination directory later and fsync it
6467 * properly. So, we have to be conservative and force commits
6468 * so the new name gets discovered.
6473 /* we can safely do the unlink without any special recording */
6477 mutex_lock(&dir->log_mutex);
6478 dir->last_unlink_trans = trans->transid;
6479 mutex_unlock(&dir->log_mutex);
6483 * Make sure that if someone attempts to fsync the parent directory of a deleted
6484 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6485 * that after replaying the log tree of the parent directory's root we will not
6486 * see the snapshot anymore and at log replay time we will not see any log tree
6487 * corresponding to the deleted snapshot's root, which could lead to replaying
6488 * it after replaying the log tree of the parent directory (which would replay
6489 * the snapshot delete operation).
6491 * Must be called before the actual snapshot destroy operation (updates to the
6492 * parent root and tree of tree roots trees, etc) are done.
6494 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6495 struct btrfs_inode *dir)
6497 mutex_lock(&dir->log_mutex);
6498 dir->last_unlink_trans = trans->transid;
6499 mutex_unlock(&dir->log_mutex);
6503 * Call this after adding a new name for a file and it will properly
6504 * update the log to reflect the new name.
6506 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
6507 * true (because it's not used).
6509 * Return value depends on whether @sync_log is true or false.
6510 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6511 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
6513 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
6514 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
6515 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6516 * committed (without attempting to sync the log).
6518 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
6519 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
6520 struct dentry *parent,
6521 bool sync_log, struct btrfs_log_ctx *ctx)
6523 struct btrfs_fs_info *fs_info = trans->fs_info;
6527 * this will force the logging code to walk the dentry chain
6530 if (!S_ISDIR(inode->vfs_inode.i_mode))
6531 inode->last_unlink_trans = trans->transid;
6534 * if this inode hasn't been logged and directory we're renaming it
6535 * from hasn't been logged, we don't need to log it
6537 if (inode->logged_trans <= fs_info->last_trans_committed &&
6538 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
6539 return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
6540 BTRFS_DONT_NEED_LOG_SYNC;
6543 struct btrfs_log_ctx ctx2;
6545 btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
6546 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6547 LOG_INODE_EXISTS, &ctx2);
6548 if (ret == BTRFS_NO_LOG_SYNC)
6549 return BTRFS_DONT_NEED_TRANS_COMMIT;
6551 return BTRFS_NEED_TRANS_COMMIT;
6553 ret = btrfs_sync_log(trans, inode->root, &ctx2);
6555 return BTRFS_NEED_TRANS_COMMIT;
6556 return BTRFS_DONT_NEED_TRANS_COMMIT;
6560 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6561 LOG_INODE_EXISTS, ctx);
6562 if (ret == BTRFS_NO_LOG_SYNC)
6563 return BTRFS_DONT_NEED_LOG_SYNC;
6565 return BTRFS_NEED_TRANS_COMMIT;
6567 return BTRFS_NEED_LOG_SYNC;