1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
15 #include "print-tree.h"
17 #include "compression.h"
19 #include "inode-map.h"
21 /* magic values for the inode_only field in btrfs_log_inode:
23 * LOG_INODE_ALL means to log everything
24 * LOG_INODE_EXISTS means to log just enough to recreate the inode
27 #define LOG_INODE_ALL 0
28 #define LOG_INODE_EXISTS 1
29 #define LOG_OTHER_INODE 2
30 #define LOG_OTHER_INODE_ALL 3
33 * directory trouble cases
35 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
36 * log, we must force a full commit before doing an fsync of the directory
37 * where the unlink was done.
38 * ---> record transid of last unlink/rename per directory
42 * rename foo/some_dir foo2/some_dir
44 * fsync foo/some_dir/some_file
46 * The fsync above will unlink the original some_dir without recording
47 * it in its new location (foo2). After a crash, some_dir will be gone
48 * unless the fsync of some_file forces a full commit
50 * 2) we must log any new names for any file or dir that is in the fsync
51 * log. ---> check inode while renaming/linking.
53 * 2a) we must log any new names for any file or dir during rename
54 * when the directory they are being removed from was logged.
55 * ---> check inode and old parent dir during rename
57 * 2a is actually the more important variant. With the extra logging
58 * a crash might unlink the old name without recreating the new one
60 * 3) after a crash, we must go through any directories with a link count
61 * of zero and redo the rm -rf
68 * The directory f1 was fully removed from the FS, but fsync was never
69 * called on f1, only its parent dir. After a crash the rm -rf must
70 * be replayed. This must be able to recurse down the entire
71 * directory tree. The inode link count fixup code takes care of the
76 * stages for the tree walking. The first
77 * stage (0) is to only pin down the blocks we find
78 * the second stage (1) is to make sure that all the inodes
79 * we find in the log are created in the subvolume.
81 * The last stage is to deal with directories and links and extents
82 * and all the other fun semantics
84 #define LOG_WALK_PIN_ONLY 0
85 #define LOG_WALK_REPLAY_INODES 1
86 #define LOG_WALK_REPLAY_DIR_INDEX 2
87 #define LOG_WALK_REPLAY_ALL 3
89 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
90 struct btrfs_root *root, struct btrfs_inode *inode,
94 struct btrfs_log_ctx *ctx);
95 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
96 struct btrfs_root *root,
97 struct btrfs_path *path, u64 objectid);
98 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
99 struct btrfs_root *root,
100 struct btrfs_root *log,
101 struct btrfs_path *path,
102 u64 dirid, int del_all);
105 * tree logging is a special write ahead log used to make sure that
106 * fsyncs and O_SYNCs can happen without doing full tree commits.
108 * Full tree commits are expensive because they require commonly
109 * modified blocks to be recowed, creating many dirty pages in the
110 * extent tree an 4x-6x higher write load than ext3.
112 * Instead of doing a tree commit on every fsync, we use the
113 * key ranges and transaction ids to find items for a given file or directory
114 * that have changed in this transaction. Those items are copied into
115 * a special tree (one per subvolume root), that tree is written to disk
116 * and then the fsync is considered complete.
118 * After a crash, items are copied out of the log-tree back into the
119 * subvolume tree. Any file data extents found are recorded in the extent
120 * allocation tree, and the log-tree freed.
122 * The log tree is read three times, once to pin down all the extents it is
123 * using in ram and once, once to create all the inodes logged in the tree
124 * and once to do all the other items.
128 * start a sub transaction and setup the log tree
129 * this increments the log tree writer count to make the people
130 * syncing the tree wait for us to finish
132 static int start_log_trans(struct btrfs_trans_handle *trans,
133 struct btrfs_root *root,
134 struct btrfs_log_ctx *ctx)
136 struct btrfs_fs_info *fs_info = root->fs_info;
139 mutex_lock(&root->log_mutex);
141 if (root->log_root) {
142 if (btrfs_need_log_full_commit(trans)) {
147 if (!root->log_start_pid) {
148 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
149 root->log_start_pid = current->pid;
150 } else if (root->log_start_pid != current->pid) {
151 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
154 mutex_lock(&fs_info->tree_log_mutex);
155 if (!fs_info->log_root_tree)
156 ret = btrfs_init_log_root_tree(trans, fs_info);
157 mutex_unlock(&fs_info->tree_log_mutex);
161 ret = btrfs_add_log_tree(trans, root);
165 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
166 root->log_start_pid = current->pid;
169 atomic_inc(&root->log_batch);
170 atomic_inc(&root->log_writers);
172 int index = root->log_transid % 2;
173 list_add_tail(&ctx->list, &root->log_ctxs[index]);
174 ctx->log_transid = root->log_transid;
178 mutex_unlock(&root->log_mutex);
183 * returns 0 if there was a log transaction running and we were able
184 * to join, or returns -ENOENT if there were not transactions
187 static int join_running_log_trans(struct btrfs_root *root)
195 mutex_lock(&root->log_mutex);
196 if (root->log_root) {
198 atomic_inc(&root->log_writers);
200 mutex_unlock(&root->log_mutex);
205 * This either makes the current running log transaction wait
206 * until you call btrfs_end_log_trans() or it makes any future
207 * log transactions wait until you call btrfs_end_log_trans()
209 void btrfs_pin_log_trans(struct btrfs_root *root)
211 mutex_lock(&root->log_mutex);
212 atomic_inc(&root->log_writers);
213 mutex_unlock(&root->log_mutex);
217 * indicate we're done making changes to the log tree
218 * and wake up anyone waiting to do a sync
220 void btrfs_end_log_trans(struct btrfs_root *root)
222 if (atomic_dec_and_test(&root->log_writers)) {
223 /* atomic_dec_and_test implies a barrier */
224 cond_wake_up_nomb(&root->log_writer_wait);
228 static int btrfs_write_tree_block(struct extent_buffer *buf)
230 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
231 buf->start + buf->len - 1);
234 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
236 filemap_fdatawait_range(buf->pages[0]->mapping,
237 buf->start, buf->start + buf->len - 1);
241 * the walk control struct is used to pass state down the chain when
242 * processing the log tree. The stage field tells us which part
243 * of the log tree processing we are currently doing. The others
244 * are state fields used for that specific part
246 struct walk_control {
247 /* should we free the extent on disk when done? This is used
248 * at transaction commit time while freeing a log tree
252 /* should we write out the extent buffer? This is used
253 * while flushing the log tree to disk during a sync
257 /* should we wait for the extent buffer io to finish? Also used
258 * while flushing the log tree to disk for a sync
262 /* pin only walk, we record which extents on disk belong to the
267 /* what stage of the replay code we're currently in */
271 * Ignore any items from the inode currently being processed. Needs
272 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
273 * the LOG_WALK_REPLAY_INODES stage.
275 bool ignore_cur_inode;
277 /* the root we are currently replaying */
278 struct btrfs_root *replay_dest;
280 /* the trans handle for the current replay */
281 struct btrfs_trans_handle *trans;
283 /* the function that gets used to process blocks we find in the
284 * tree. Note the extent_buffer might not be up to date when it is
285 * passed in, and it must be checked or read if you need the data
288 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
289 struct walk_control *wc, u64 gen, int level);
293 * process_func used to pin down extents, write them or wait on them
295 static int process_one_buffer(struct btrfs_root *log,
296 struct extent_buffer *eb,
297 struct walk_control *wc, u64 gen, int level)
299 struct btrfs_fs_info *fs_info = log->fs_info;
303 * If this fs is mixed then we need to be able to process the leaves to
304 * pin down any logged extents, so we have to read the block.
306 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
307 ret = btrfs_read_buffer(eb, gen, level, NULL);
313 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
316 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
317 if (wc->pin && btrfs_header_level(eb) == 0)
318 ret = btrfs_exclude_logged_extents(eb);
320 btrfs_write_tree_block(eb);
322 btrfs_wait_tree_block_writeback(eb);
328 * Item overwrite used by replay and tree logging. eb, slot and key all refer
329 * to the src data we are copying out.
331 * root is the tree we are copying into, and path is a scratch
332 * path for use in this function (it should be released on entry and
333 * will be released on exit).
335 * If the key is already in the destination tree the existing item is
336 * overwritten. If the existing item isn't big enough, it is extended.
337 * If it is too large, it is truncated.
339 * If the key isn't in the destination yet, a new item is inserted.
341 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
342 struct btrfs_root *root,
343 struct btrfs_path *path,
344 struct extent_buffer *eb, int slot,
345 struct btrfs_key *key)
349 u64 saved_i_size = 0;
350 int save_old_i_size = 0;
351 unsigned long src_ptr;
352 unsigned long dst_ptr;
353 int overwrite_root = 0;
354 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
356 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
359 item_size = btrfs_item_size_nr(eb, slot);
360 src_ptr = btrfs_item_ptr_offset(eb, slot);
362 /* look for the key in the destination tree */
363 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
370 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
372 if (dst_size != item_size)
375 if (item_size == 0) {
376 btrfs_release_path(path);
379 dst_copy = kmalloc(item_size, GFP_NOFS);
380 src_copy = kmalloc(item_size, GFP_NOFS);
381 if (!dst_copy || !src_copy) {
382 btrfs_release_path(path);
388 read_extent_buffer(eb, src_copy, src_ptr, item_size);
390 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
391 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
393 ret = memcmp(dst_copy, src_copy, item_size);
398 * they have the same contents, just return, this saves
399 * us from cowing blocks in the destination tree and doing
400 * extra writes that may not have been done by a previous
404 btrfs_release_path(path);
409 * We need to load the old nbytes into the inode so when we
410 * replay the extents we've logged we get the right nbytes.
413 struct btrfs_inode_item *item;
417 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
418 struct btrfs_inode_item);
419 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
420 item = btrfs_item_ptr(eb, slot,
421 struct btrfs_inode_item);
422 btrfs_set_inode_nbytes(eb, item, nbytes);
425 * If this is a directory we need to reset the i_size to
426 * 0 so that we can set it up properly when replaying
427 * the rest of the items in this log.
429 mode = btrfs_inode_mode(eb, item);
431 btrfs_set_inode_size(eb, item, 0);
433 } else if (inode_item) {
434 struct btrfs_inode_item *item;
438 * New inode, set nbytes to 0 so that the nbytes comes out
439 * properly when we replay the extents.
441 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
442 btrfs_set_inode_nbytes(eb, item, 0);
445 * If this is a directory we need to reset the i_size to 0 so
446 * that we can set it up properly when replaying the rest of
447 * the items in this log.
449 mode = btrfs_inode_mode(eb, item);
451 btrfs_set_inode_size(eb, item, 0);
454 btrfs_release_path(path);
455 /* try to insert the key into the destination tree */
456 path->skip_release_on_error = 1;
457 ret = btrfs_insert_empty_item(trans, root, path,
459 path->skip_release_on_error = 0;
461 /* make sure any existing item is the correct size */
462 if (ret == -EEXIST || ret == -EOVERFLOW) {
464 found_size = btrfs_item_size_nr(path->nodes[0],
466 if (found_size > item_size)
467 btrfs_truncate_item(path, item_size, 1);
468 else if (found_size < item_size)
469 btrfs_extend_item(path, item_size - found_size);
473 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
476 /* don't overwrite an existing inode if the generation number
477 * was logged as zero. This is done when the tree logging code
478 * is just logging an inode to make sure it exists after recovery.
480 * Also, don't overwrite i_size on directories during replay.
481 * log replay inserts and removes directory items based on the
482 * state of the tree found in the subvolume, and i_size is modified
485 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
486 struct btrfs_inode_item *src_item;
487 struct btrfs_inode_item *dst_item;
489 src_item = (struct btrfs_inode_item *)src_ptr;
490 dst_item = (struct btrfs_inode_item *)dst_ptr;
492 if (btrfs_inode_generation(eb, src_item) == 0) {
493 struct extent_buffer *dst_eb = path->nodes[0];
494 const u64 ino_size = btrfs_inode_size(eb, src_item);
497 * For regular files an ino_size == 0 is used only when
498 * logging that an inode exists, as part of a directory
499 * fsync, and the inode wasn't fsynced before. In this
500 * case don't set the size of the inode in the fs/subvol
501 * tree, otherwise we would be throwing valid data away.
503 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
504 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
506 struct btrfs_map_token token;
508 btrfs_init_map_token(&token);
509 btrfs_set_token_inode_size(dst_eb, dst_item,
515 if (overwrite_root &&
516 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
517 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
519 saved_i_size = btrfs_inode_size(path->nodes[0],
524 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
527 if (save_old_i_size) {
528 struct btrfs_inode_item *dst_item;
529 dst_item = (struct btrfs_inode_item *)dst_ptr;
530 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
533 /* make sure the generation is filled in */
534 if (key->type == BTRFS_INODE_ITEM_KEY) {
535 struct btrfs_inode_item *dst_item;
536 dst_item = (struct btrfs_inode_item *)dst_ptr;
537 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
538 btrfs_set_inode_generation(path->nodes[0], dst_item,
543 btrfs_mark_buffer_dirty(path->nodes[0]);
544 btrfs_release_path(path);
549 * simple helper to read an inode off the disk from a given root
550 * This can only be called for subvolume roots and not for the log
552 static noinline struct inode *read_one_inode(struct btrfs_root *root,
555 struct btrfs_key key;
558 key.objectid = objectid;
559 key.type = BTRFS_INODE_ITEM_KEY;
561 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
567 /* replays a single extent in 'eb' at 'slot' with 'key' into the
568 * subvolume 'root'. path is released on entry and should be released
571 * extents in the log tree have not been allocated out of the extent
572 * tree yet. So, this completes the allocation, taking a reference
573 * as required if the extent already exists or creating a new extent
574 * if it isn't in the extent allocation tree yet.
576 * The extent is inserted into the file, dropping any existing extents
577 * from the file that overlap the new one.
579 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
580 struct btrfs_root *root,
581 struct btrfs_path *path,
582 struct extent_buffer *eb, int slot,
583 struct btrfs_key *key)
585 struct btrfs_fs_info *fs_info = root->fs_info;
588 u64 start = key->offset;
590 struct btrfs_file_extent_item *item;
591 struct inode *inode = NULL;
595 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
596 found_type = btrfs_file_extent_type(eb, item);
598 if (found_type == BTRFS_FILE_EXTENT_REG ||
599 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
600 nbytes = btrfs_file_extent_num_bytes(eb, item);
601 extent_end = start + nbytes;
604 * We don't add to the inodes nbytes if we are prealloc or a
607 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
609 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
610 size = btrfs_file_extent_ram_bytes(eb, item);
611 nbytes = btrfs_file_extent_ram_bytes(eb, item);
612 extent_end = ALIGN(start + size,
613 fs_info->sectorsize);
619 inode = read_one_inode(root, key->objectid);
626 * first check to see if we already have this extent in the
627 * file. This must be done before the btrfs_drop_extents run
628 * so we don't try to drop this extent.
630 ret = btrfs_lookup_file_extent(trans, root, path,
631 btrfs_ino(BTRFS_I(inode)), start, 0);
634 (found_type == BTRFS_FILE_EXTENT_REG ||
635 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
636 struct btrfs_file_extent_item cmp1;
637 struct btrfs_file_extent_item cmp2;
638 struct btrfs_file_extent_item *existing;
639 struct extent_buffer *leaf;
641 leaf = path->nodes[0];
642 existing = btrfs_item_ptr(leaf, path->slots[0],
643 struct btrfs_file_extent_item);
645 read_extent_buffer(eb, &cmp1, (unsigned long)item,
647 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
651 * we already have a pointer to this exact extent,
652 * we don't have to do anything
654 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
655 btrfs_release_path(path);
659 btrfs_release_path(path);
661 /* drop any overlapping extents */
662 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
666 if (found_type == BTRFS_FILE_EXTENT_REG ||
667 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
669 unsigned long dest_offset;
670 struct btrfs_key ins;
672 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
673 btrfs_fs_incompat(fs_info, NO_HOLES))
676 ret = btrfs_insert_empty_item(trans, root, path, key,
680 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
682 copy_extent_buffer(path->nodes[0], eb, dest_offset,
683 (unsigned long)item, sizeof(*item));
685 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
686 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
687 ins.type = BTRFS_EXTENT_ITEM_KEY;
688 offset = key->offset - btrfs_file_extent_offset(eb, item);
691 * Manually record dirty extent, as here we did a shallow
692 * file extent item copy and skip normal backref update,
693 * but modifying extent tree all by ourselves.
694 * So need to manually record dirty extent for qgroup,
695 * as the owner of the file extent changed from log tree
696 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
698 ret = btrfs_qgroup_trace_extent(trans,
699 btrfs_file_extent_disk_bytenr(eb, item),
700 btrfs_file_extent_disk_num_bytes(eb, item),
705 if (ins.objectid > 0) {
706 struct btrfs_ref ref = { 0 };
709 LIST_HEAD(ordered_sums);
712 * is this extent already allocated in the extent
713 * allocation tree? If so, just add a reference
715 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
718 btrfs_init_generic_ref(&ref,
719 BTRFS_ADD_DELAYED_REF,
720 ins.objectid, ins.offset, 0);
721 btrfs_init_data_ref(&ref,
722 root->root_key.objectid,
723 key->objectid, offset);
724 ret = btrfs_inc_extent_ref(trans, &ref);
729 * insert the extent pointer in the extent
732 ret = btrfs_alloc_logged_file_extent(trans,
733 root->root_key.objectid,
734 key->objectid, offset, &ins);
738 btrfs_release_path(path);
740 if (btrfs_file_extent_compression(eb, item)) {
741 csum_start = ins.objectid;
742 csum_end = csum_start + ins.offset;
744 csum_start = ins.objectid +
745 btrfs_file_extent_offset(eb, item);
746 csum_end = csum_start +
747 btrfs_file_extent_num_bytes(eb, item);
750 ret = btrfs_lookup_csums_range(root->log_root,
751 csum_start, csum_end - 1,
756 * Now delete all existing cums in the csum root that
757 * cover our range. We do this because we can have an
758 * extent that is completely referenced by one file
759 * extent item and partially referenced by another
760 * file extent item (like after using the clone or
761 * extent_same ioctls). In this case if we end up doing
762 * the replay of the one that partially references the
763 * extent first, and we do not do the csum deletion
764 * below, we can get 2 csum items in the csum tree that
765 * overlap each other. For example, imagine our log has
766 * the two following file extent items:
768 * key (257 EXTENT_DATA 409600)
769 * extent data disk byte 12845056 nr 102400
770 * extent data offset 20480 nr 20480 ram 102400
772 * key (257 EXTENT_DATA 819200)
773 * extent data disk byte 12845056 nr 102400
774 * extent data offset 0 nr 102400 ram 102400
776 * Where the second one fully references the 100K extent
777 * that starts at disk byte 12845056, and the log tree
778 * has a single csum item that covers the entire range
781 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
783 * After the first file extent item is replayed, the
784 * csum tree gets the following csum item:
786 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
788 * Which covers the 20K sub-range starting at offset 20K
789 * of our extent. Now when we replay the second file
790 * extent item, if we do not delete existing csum items
791 * that cover any of its blocks, we end up getting two
792 * csum items in our csum tree that overlap each other:
794 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
795 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
797 * Which is a problem, because after this anyone trying
798 * to lookup up for the checksum of any block of our
799 * extent starting at an offset of 40K or higher, will
800 * end up looking at the second csum item only, which
801 * does not contain the checksum for any block starting
802 * at offset 40K or higher of our extent.
804 while (!list_empty(&ordered_sums)) {
805 struct btrfs_ordered_sum *sums;
806 sums = list_entry(ordered_sums.next,
807 struct btrfs_ordered_sum,
810 ret = btrfs_del_csums(trans, fs_info,
814 ret = btrfs_csum_file_blocks(trans,
815 fs_info->csum_root, sums);
816 list_del(&sums->list);
822 btrfs_release_path(path);
824 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
825 /* inline extents are easy, we just overwrite them */
826 ret = overwrite_item(trans, root, path, eb, slot, key);
831 inode_add_bytes(inode, nbytes);
833 ret = btrfs_update_inode(trans, root, inode);
841 * when cleaning up conflicts between the directory names in the
842 * subvolume, directory names in the log and directory names in the
843 * inode back references, we may have to unlink inodes from directories.
845 * This is a helper function to do the unlink of a specific directory
848 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
849 struct btrfs_root *root,
850 struct btrfs_path *path,
851 struct btrfs_inode *dir,
852 struct btrfs_dir_item *di)
857 struct extent_buffer *leaf;
858 struct btrfs_key location;
861 leaf = path->nodes[0];
863 btrfs_dir_item_key_to_cpu(leaf, di, &location);
864 name_len = btrfs_dir_name_len(leaf, di);
865 name = kmalloc(name_len, GFP_NOFS);
869 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
870 btrfs_release_path(path);
872 inode = read_one_inode(root, location.objectid);
878 ret = link_to_fixup_dir(trans, root, path, location.objectid);
882 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
887 ret = btrfs_run_delayed_items(trans);
895 * helper function to see if a given name and sequence number found
896 * in an inode back reference are already in a directory and correctly
897 * point to this inode
899 static noinline int inode_in_dir(struct btrfs_root *root,
900 struct btrfs_path *path,
901 u64 dirid, u64 objectid, u64 index,
902 const char *name, int name_len)
904 struct btrfs_dir_item *di;
905 struct btrfs_key location;
908 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
909 index, name, name_len, 0);
910 if (di && !IS_ERR(di)) {
911 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
912 if (location.objectid != objectid)
916 btrfs_release_path(path);
918 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
919 if (di && !IS_ERR(di)) {
920 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
921 if (location.objectid != objectid)
927 btrfs_release_path(path);
932 * helper function to check a log tree for a named back reference in
933 * an inode. This is used to decide if a back reference that is
934 * found in the subvolume conflicts with what we find in the log.
936 * inode backreferences may have multiple refs in a single item,
937 * during replay we process one reference at a time, and we don't
938 * want to delete valid links to a file from the subvolume if that
939 * link is also in the log.
941 static noinline int backref_in_log(struct btrfs_root *log,
942 struct btrfs_key *key,
944 const char *name, int namelen)
946 struct btrfs_path *path;
947 struct btrfs_inode_ref *ref;
949 unsigned long ptr_end;
950 unsigned long name_ptr;
956 path = btrfs_alloc_path();
960 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
964 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
966 if (key->type == BTRFS_INODE_EXTREF_KEY) {
967 if (btrfs_find_name_in_ext_backref(path->nodes[0],
970 name, namelen, NULL))
976 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
977 ptr_end = ptr + item_size;
978 while (ptr < ptr_end) {
979 ref = (struct btrfs_inode_ref *)ptr;
980 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
981 if (found_name_len == namelen) {
982 name_ptr = (unsigned long)(ref + 1);
983 ret = memcmp_extent_buffer(path->nodes[0], name,
990 ptr = (unsigned long)(ref + 1) + found_name_len;
993 btrfs_free_path(path);
997 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
998 struct btrfs_root *root,
999 struct btrfs_path *path,
1000 struct btrfs_root *log_root,
1001 struct btrfs_inode *dir,
1002 struct btrfs_inode *inode,
1003 u64 inode_objectid, u64 parent_objectid,
1004 u64 ref_index, char *name, int namelen,
1009 int victim_name_len;
1010 struct extent_buffer *leaf;
1011 struct btrfs_dir_item *di;
1012 struct btrfs_key search_key;
1013 struct btrfs_inode_extref *extref;
1016 /* Search old style refs */
1017 search_key.objectid = inode_objectid;
1018 search_key.type = BTRFS_INODE_REF_KEY;
1019 search_key.offset = parent_objectid;
1020 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1022 struct btrfs_inode_ref *victim_ref;
1024 unsigned long ptr_end;
1026 leaf = path->nodes[0];
1028 /* are we trying to overwrite a back ref for the root directory
1029 * if so, just jump out, we're done
1031 if (search_key.objectid == search_key.offset)
1034 /* check all the names in this back reference to see
1035 * if they are in the log. if so, we allow them to stay
1036 * otherwise they must be unlinked as a conflict
1038 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1039 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1040 while (ptr < ptr_end) {
1041 victim_ref = (struct btrfs_inode_ref *)ptr;
1042 victim_name_len = btrfs_inode_ref_name_len(leaf,
1044 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1048 read_extent_buffer(leaf, victim_name,
1049 (unsigned long)(victim_ref + 1),
1052 if (!backref_in_log(log_root, &search_key,
1056 inc_nlink(&inode->vfs_inode);
1057 btrfs_release_path(path);
1059 ret = btrfs_unlink_inode(trans, root, dir, inode,
1060 victim_name, victim_name_len);
1064 ret = btrfs_run_delayed_items(trans);
1072 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1076 * NOTE: we have searched root tree and checked the
1077 * corresponding ref, it does not need to check again.
1081 btrfs_release_path(path);
1083 /* Same search but for extended refs */
1084 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1085 inode_objectid, parent_objectid, 0,
1087 if (!IS_ERR_OR_NULL(extref)) {
1091 struct inode *victim_parent;
1093 leaf = path->nodes[0];
1095 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1096 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1098 while (cur_offset < item_size) {
1099 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1101 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1103 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1106 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1109 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1112 search_key.objectid = inode_objectid;
1113 search_key.type = BTRFS_INODE_EXTREF_KEY;
1114 search_key.offset = btrfs_extref_hash(parent_objectid,
1118 if (!backref_in_log(log_root, &search_key,
1119 parent_objectid, victim_name,
1122 victim_parent = read_one_inode(root,
1124 if (victim_parent) {
1125 inc_nlink(&inode->vfs_inode);
1126 btrfs_release_path(path);
1128 ret = btrfs_unlink_inode(trans, root,
1129 BTRFS_I(victim_parent),
1134 ret = btrfs_run_delayed_items(
1137 iput(victim_parent);
1146 cur_offset += victim_name_len + sizeof(*extref);
1150 btrfs_release_path(path);
1152 /* look for a conflicting sequence number */
1153 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1154 ref_index, name, namelen, 0);
1155 if (di && !IS_ERR(di)) {
1156 ret = drop_one_dir_item(trans, root, path, dir, di);
1160 btrfs_release_path(path);
1162 /* look for a conflicting name */
1163 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1165 if (di && !IS_ERR(di)) {
1166 ret = drop_one_dir_item(trans, root, path, dir, di);
1170 btrfs_release_path(path);
1175 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1176 u32 *namelen, char **name, u64 *index,
1177 u64 *parent_objectid)
1179 struct btrfs_inode_extref *extref;
1181 extref = (struct btrfs_inode_extref *)ref_ptr;
1183 *namelen = btrfs_inode_extref_name_len(eb, extref);
1184 *name = kmalloc(*namelen, GFP_NOFS);
1188 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1192 *index = btrfs_inode_extref_index(eb, extref);
1193 if (parent_objectid)
1194 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1199 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1200 u32 *namelen, char **name, u64 *index)
1202 struct btrfs_inode_ref *ref;
1204 ref = (struct btrfs_inode_ref *)ref_ptr;
1206 *namelen = btrfs_inode_ref_name_len(eb, ref);
1207 *name = kmalloc(*namelen, GFP_NOFS);
1211 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1214 *index = btrfs_inode_ref_index(eb, ref);
1220 * Take an inode reference item from the log tree and iterate all names from the
1221 * inode reference item in the subvolume tree with the same key (if it exists).
1222 * For any name that is not in the inode reference item from the log tree, do a
1223 * proper unlink of that name (that is, remove its entry from the inode
1224 * reference item and both dir index keys).
1226 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1227 struct btrfs_root *root,
1228 struct btrfs_path *path,
1229 struct btrfs_inode *inode,
1230 struct extent_buffer *log_eb,
1232 struct btrfs_key *key)
1235 unsigned long ref_ptr;
1236 unsigned long ref_end;
1237 struct extent_buffer *eb;
1240 btrfs_release_path(path);
1241 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1249 eb = path->nodes[0];
1250 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1251 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1252 while (ref_ptr < ref_end) {
1257 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1258 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1261 parent_id = key->offset;
1262 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1268 if (key->type == BTRFS_INODE_EXTREF_KEY)
1269 ret = btrfs_find_name_in_ext_backref(log_eb, log_slot,
1273 ret = btrfs_find_name_in_backref(log_eb, log_slot, name,
1279 btrfs_release_path(path);
1280 dir = read_one_inode(root, parent_id);
1286 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
1287 inode, name, namelen);
1297 if (key->type == BTRFS_INODE_EXTREF_KEY)
1298 ref_ptr += sizeof(struct btrfs_inode_extref);
1300 ref_ptr += sizeof(struct btrfs_inode_ref);
1304 btrfs_release_path(path);
1308 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1309 const u8 ref_type, const char *name,
1312 struct btrfs_key key;
1313 struct btrfs_path *path;
1314 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1317 path = btrfs_alloc_path();
1321 key.objectid = btrfs_ino(BTRFS_I(inode));
1322 key.type = ref_type;
1323 if (key.type == BTRFS_INODE_REF_KEY)
1324 key.offset = parent_id;
1326 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1328 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1335 if (key.type == BTRFS_INODE_EXTREF_KEY)
1336 ret = btrfs_find_name_in_ext_backref(path->nodes[0],
1337 path->slots[0], parent_id,
1338 name, namelen, NULL);
1340 ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1341 name, namelen, NULL);
1344 btrfs_free_path(path);
1348 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1349 struct inode *dir, struct inode *inode, const char *name,
1350 int namelen, u64 ref_index)
1352 struct btrfs_dir_item *dir_item;
1353 struct btrfs_key key;
1354 struct btrfs_path *path;
1355 struct inode *other_inode = NULL;
1358 path = btrfs_alloc_path();
1362 dir_item = btrfs_lookup_dir_item(NULL, root, path,
1363 btrfs_ino(BTRFS_I(dir)),
1366 btrfs_release_path(path);
1368 } else if (IS_ERR(dir_item)) {
1369 ret = PTR_ERR(dir_item);
1374 * Our inode's dentry collides with the dentry of another inode which is
1375 * in the log but not yet processed since it has a higher inode number.
1376 * So delete that other dentry.
1378 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1379 btrfs_release_path(path);
1380 other_inode = read_one_inode(root, key.objectid);
1385 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
1390 * If we dropped the link count to 0, bump it so that later the iput()
1391 * on the inode will not free it. We will fixup the link count later.
1393 if (other_inode->i_nlink == 0)
1394 inc_nlink(other_inode);
1396 ret = btrfs_run_delayed_items(trans);
1400 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1401 name, namelen, 0, ref_index);
1404 btrfs_free_path(path);
1410 * replay one inode back reference item found in the log tree.
1411 * eb, slot and key refer to the buffer and key found in the log tree.
1412 * root is the destination we are replaying into, and path is for temp
1413 * use by this function. (it should be released on return).
1415 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1416 struct btrfs_root *root,
1417 struct btrfs_root *log,
1418 struct btrfs_path *path,
1419 struct extent_buffer *eb, int slot,
1420 struct btrfs_key *key)
1422 struct inode *dir = NULL;
1423 struct inode *inode = NULL;
1424 unsigned long ref_ptr;
1425 unsigned long ref_end;
1429 int search_done = 0;
1430 int log_ref_ver = 0;
1431 u64 parent_objectid;
1434 int ref_struct_size;
1436 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1437 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1439 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1440 struct btrfs_inode_extref *r;
1442 ref_struct_size = sizeof(struct btrfs_inode_extref);
1444 r = (struct btrfs_inode_extref *)ref_ptr;
1445 parent_objectid = btrfs_inode_extref_parent(eb, r);
1447 ref_struct_size = sizeof(struct btrfs_inode_ref);
1448 parent_objectid = key->offset;
1450 inode_objectid = key->objectid;
1453 * it is possible that we didn't log all the parent directories
1454 * for a given inode. If we don't find the dir, just don't
1455 * copy the back ref in. The link count fixup code will take
1458 dir = read_one_inode(root, parent_objectid);
1464 inode = read_one_inode(root, inode_objectid);
1470 while (ref_ptr < ref_end) {
1472 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1473 &ref_index, &parent_objectid);
1475 * parent object can change from one array
1479 dir = read_one_inode(root, parent_objectid);
1485 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1491 /* if we already have a perfect match, we're done */
1492 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1493 btrfs_ino(BTRFS_I(inode)), ref_index,
1496 * look for a conflicting back reference in the
1497 * metadata. if we find one we have to unlink that name
1498 * of the file before we add our new link. Later on, we
1499 * overwrite any existing back reference, and we don't
1500 * want to create dangling pointers in the directory.
1504 ret = __add_inode_ref(trans, root, path, log,
1509 ref_index, name, namelen,
1519 * If a reference item already exists for this inode
1520 * with the same parent and name, but different index,
1521 * drop it and the corresponding directory index entries
1522 * from the parent before adding the new reference item
1523 * and dir index entries, otherwise we would fail with
1524 * -EEXIST returned from btrfs_add_link() below.
1526 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1529 ret = btrfs_unlink_inode(trans, root,
1534 * If we dropped the link count to 0, bump it so
1535 * that later the iput() on the inode will not
1536 * free it. We will fixup the link count later.
1538 if (!ret && inode->i_nlink == 0)
1544 /* insert our name */
1545 ret = add_link(trans, root, dir, inode, name, namelen,
1550 btrfs_update_inode(trans, root, inode);
1553 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1563 * Before we overwrite the inode reference item in the subvolume tree
1564 * with the item from the log tree, we must unlink all names from the
1565 * parent directory that are in the subvolume's tree inode reference
1566 * item, otherwise we end up with an inconsistent subvolume tree where
1567 * dir index entries exist for a name but there is no inode reference
1568 * item with the same name.
1570 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1575 /* finally write the back reference in the inode */
1576 ret = overwrite_item(trans, root, path, eb, slot, key);
1578 btrfs_release_path(path);
1585 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1586 struct btrfs_root *root, u64 ino)
1590 ret = btrfs_insert_orphan_item(trans, root, ino);
1597 static int count_inode_extrefs(struct btrfs_root *root,
1598 struct btrfs_inode *inode, struct btrfs_path *path)
1602 unsigned int nlink = 0;
1605 u64 inode_objectid = btrfs_ino(inode);
1608 struct btrfs_inode_extref *extref;
1609 struct extent_buffer *leaf;
1612 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1617 leaf = path->nodes[0];
1618 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1619 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1622 while (cur_offset < item_size) {
1623 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1624 name_len = btrfs_inode_extref_name_len(leaf, extref);
1628 cur_offset += name_len + sizeof(*extref);
1632 btrfs_release_path(path);
1634 btrfs_release_path(path);
1636 if (ret < 0 && ret != -ENOENT)
1641 static int count_inode_refs(struct btrfs_root *root,
1642 struct btrfs_inode *inode, struct btrfs_path *path)
1645 struct btrfs_key key;
1646 unsigned int nlink = 0;
1648 unsigned long ptr_end;
1650 u64 ino = btrfs_ino(inode);
1653 key.type = BTRFS_INODE_REF_KEY;
1654 key.offset = (u64)-1;
1657 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1661 if (path->slots[0] == 0)
1666 btrfs_item_key_to_cpu(path->nodes[0], &key,
1668 if (key.objectid != ino ||
1669 key.type != BTRFS_INODE_REF_KEY)
1671 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1672 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1674 while (ptr < ptr_end) {
1675 struct btrfs_inode_ref *ref;
1677 ref = (struct btrfs_inode_ref *)ptr;
1678 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1680 ptr = (unsigned long)(ref + 1) + name_len;
1684 if (key.offset == 0)
1686 if (path->slots[0] > 0) {
1691 btrfs_release_path(path);
1693 btrfs_release_path(path);
1699 * There are a few corners where the link count of the file can't
1700 * be properly maintained during replay. So, instead of adding
1701 * lots of complexity to the log code, we just scan the backrefs
1702 * for any file that has been through replay.
1704 * The scan will update the link count on the inode to reflect the
1705 * number of back refs found. If it goes down to zero, the iput
1706 * will free the inode.
1708 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1709 struct btrfs_root *root,
1710 struct inode *inode)
1712 struct btrfs_path *path;
1715 u64 ino = btrfs_ino(BTRFS_I(inode));
1717 path = btrfs_alloc_path();
1721 ret = count_inode_refs(root, BTRFS_I(inode), path);
1727 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1735 if (nlink != inode->i_nlink) {
1736 set_nlink(inode, nlink);
1737 btrfs_update_inode(trans, root, inode);
1739 BTRFS_I(inode)->index_cnt = (u64)-1;
1741 if (inode->i_nlink == 0) {
1742 if (S_ISDIR(inode->i_mode)) {
1743 ret = replay_dir_deletes(trans, root, NULL, path,
1748 ret = insert_orphan_item(trans, root, ino);
1752 btrfs_free_path(path);
1756 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1757 struct btrfs_root *root,
1758 struct btrfs_path *path)
1761 struct btrfs_key key;
1762 struct inode *inode;
1764 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1765 key.type = BTRFS_ORPHAN_ITEM_KEY;
1766 key.offset = (u64)-1;
1768 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1773 if (path->slots[0] == 0)
1778 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1779 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1780 key.type != BTRFS_ORPHAN_ITEM_KEY)
1783 ret = btrfs_del_item(trans, root, path);
1787 btrfs_release_path(path);
1788 inode = read_one_inode(root, key.offset);
1792 ret = fixup_inode_link_count(trans, root, inode);
1798 * fixup on a directory may create new entries,
1799 * make sure we always look for the highset possible
1802 key.offset = (u64)-1;
1806 btrfs_release_path(path);
1812 * record a given inode in the fixup dir so we can check its link
1813 * count when replay is done. The link count is incremented here
1814 * so the inode won't go away until we check it
1816 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1817 struct btrfs_root *root,
1818 struct btrfs_path *path,
1821 struct btrfs_key key;
1823 struct inode *inode;
1825 inode = read_one_inode(root, objectid);
1829 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1830 key.type = BTRFS_ORPHAN_ITEM_KEY;
1831 key.offset = objectid;
1833 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1835 btrfs_release_path(path);
1837 if (!inode->i_nlink)
1838 set_nlink(inode, 1);
1841 ret = btrfs_update_inode(trans, root, inode);
1842 } else if (ret == -EEXIST) {
1845 BUG(); /* Logic Error */
1853 * when replaying the log for a directory, we only insert names
1854 * for inodes that actually exist. This means an fsync on a directory
1855 * does not implicitly fsync all the new files in it
1857 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1858 struct btrfs_root *root,
1859 u64 dirid, u64 index,
1860 char *name, int name_len,
1861 struct btrfs_key *location)
1863 struct inode *inode;
1867 inode = read_one_inode(root, location->objectid);
1871 dir = read_one_inode(root, dirid);
1877 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1878 name_len, 1, index);
1880 /* FIXME, put inode into FIXUP list */
1888 * Return true if an inode reference exists in the log for the given name,
1889 * inode and parent inode.
1891 static bool name_in_log_ref(struct btrfs_root *log_root,
1892 const char *name, const int name_len,
1893 const u64 dirid, const u64 ino)
1895 struct btrfs_key search_key;
1897 search_key.objectid = ino;
1898 search_key.type = BTRFS_INODE_REF_KEY;
1899 search_key.offset = dirid;
1900 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1903 search_key.type = BTRFS_INODE_EXTREF_KEY;
1904 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1905 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1912 * take a single entry in a log directory item and replay it into
1915 * if a conflicting item exists in the subdirectory already,
1916 * the inode it points to is unlinked and put into the link count
1919 * If a name from the log points to a file or directory that does
1920 * not exist in the FS, it is skipped. fsyncs on directories
1921 * do not force down inodes inside that directory, just changes to the
1922 * names or unlinks in a directory.
1924 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1925 * non-existing inode) and 1 if the name was replayed.
1927 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1928 struct btrfs_root *root,
1929 struct btrfs_path *path,
1930 struct extent_buffer *eb,
1931 struct btrfs_dir_item *di,
1932 struct btrfs_key *key)
1936 struct btrfs_dir_item *dst_di;
1937 struct btrfs_key found_key;
1938 struct btrfs_key log_key;
1943 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1944 bool name_added = false;
1946 dir = read_one_inode(root, key->objectid);
1950 name_len = btrfs_dir_name_len(eb, di);
1951 name = kmalloc(name_len, GFP_NOFS);
1957 log_type = btrfs_dir_type(eb, di);
1958 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1961 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1962 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1967 btrfs_release_path(path);
1969 if (key->type == BTRFS_DIR_ITEM_KEY) {
1970 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1972 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1973 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1982 if (IS_ERR_OR_NULL(dst_di)) {
1983 /* we need a sequence number to insert, so we only
1984 * do inserts for the BTRFS_DIR_INDEX_KEY types
1986 if (key->type != BTRFS_DIR_INDEX_KEY)
1991 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1992 /* the existing item matches the logged item */
1993 if (found_key.objectid == log_key.objectid &&
1994 found_key.type == log_key.type &&
1995 found_key.offset == log_key.offset &&
1996 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1997 update_size = false;
2002 * don't drop the conflicting directory entry if the inode
2003 * for the new entry doesn't exist
2008 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
2012 if (key->type == BTRFS_DIR_INDEX_KEY)
2015 btrfs_release_path(path);
2016 if (!ret && update_size) {
2017 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
2018 ret = btrfs_update_inode(trans, root, dir);
2022 if (!ret && name_added)
2027 if (name_in_log_ref(root->log_root, name, name_len,
2028 key->objectid, log_key.objectid)) {
2029 /* The dentry will be added later. */
2031 update_size = false;
2034 btrfs_release_path(path);
2035 ret = insert_one_name(trans, root, key->objectid, key->offset,
2036 name, name_len, &log_key);
2037 if (ret && ret != -ENOENT && ret != -EEXIST)
2041 update_size = false;
2047 * find all the names in a directory item and reconcile them into
2048 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2049 * one name in a directory item, but the same code gets used for
2050 * both directory index types
2052 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2053 struct btrfs_root *root,
2054 struct btrfs_path *path,
2055 struct extent_buffer *eb, int slot,
2056 struct btrfs_key *key)
2059 u32 item_size = btrfs_item_size_nr(eb, slot);
2060 struct btrfs_dir_item *di;
2063 unsigned long ptr_end;
2064 struct btrfs_path *fixup_path = NULL;
2066 ptr = btrfs_item_ptr_offset(eb, slot);
2067 ptr_end = ptr + item_size;
2068 while (ptr < ptr_end) {
2069 di = (struct btrfs_dir_item *)ptr;
2070 name_len = btrfs_dir_name_len(eb, di);
2071 ret = replay_one_name(trans, root, path, eb, di, key);
2074 ptr = (unsigned long)(di + 1);
2078 * If this entry refers to a non-directory (directories can not
2079 * have a link count > 1) and it was added in the transaction
2080 * that was not committed, make sure we fixup the link count of
2081 * the inode it the entry points to. Otherwise something like
2082 * the following would result in a directory pointing to an
2083 * inode with a wrong link that does not account for this dir
2091 * ln testdir/bar testdir/bar_link
2092 * ln testdir/foo testdir/foo_link
2093 * xfs_io -c "fsync" testdir/bar
2097 * mount fs, log replay happens
2099 * File foo would remain with a link count of 1 when it has two
2100 * entries pointing to it in the directory testdir. This would
2101 * make it impossible to ever delete the parent directory has
2102 * it would result in stale dentries that can never be deleted.
2104 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2105 struct btrfs_key di_key;
2108 fixup_path = btrfs_alloc_path();
2115 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2116 ret = link_to_fixup_dir(trans, root, fixup_path,
2123 btrfs_free_path(fixup_path);
2128 * directory replay has two parts. There are the standard directory
2129 * items in the log copied from the subvolume, and range items
2130 * created in the log while the subvolume was logged.
2132 * The range items tell us which parts of the key space the log
2133 * is authoritative for. During replay, if a key in the subvolume
2134 * directory is in a logged range item, but not actually in the log
2135 * that means it was deleted from the directory before the fsync
2136 * and should be removed.
2138 static noinline int find_dir_range(struct btrfs_root *root,
2139 struct btrfs_path *path,
2140 u64 dirid, int key_type,
2141 u64 *start_ret, u64 *end_ret)
2143 struct btrfs_key key;
2145 struct btrfs_dir_log_item *item;
2149 if (*start_ret == (u64)-1)
2152 key.objectid = dirid;
2153 key.type = key_type;
2154 key.offset = *start_ret;
2156 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2160 if (path->slots[0] == 0)
2165 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2167 if (key.type != key_type || key.objectid != dirid) {
2171 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2172 struct btrfs_dir_log_item);
2173 found_end = btrfs_dir_log_end(path->nodes[0], item);
2175 if (*start_ret >= key.offset && *start_ret <= found_end) {
2177 *start_ret = key.offset;
2178 *end_ret = found_end;
2183 /* check the next slot in the tree to see if it is a valid item */
2184 nritems = btrfs_header_nritems(path->nodes[0]);
2186 if (path->slots[0] >= nritems) {
2187 ret = btrfs_next_leaf(root, path);
2192 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2194 if (key.type != key_type || key.objectid != dirid) {
2198 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2199 struct btrfs_dir_log_item);
2200 found_end = btrfs_dir_log_end(path->nodes[0], item);
2201 *start_ret = key.offset;
2202 *end_ret = found_end;
2205 btrfs_release_path(path);
2210 * this looks for a given directory item in the log. If the directory
2211 * item is not in the log, the item is removed and the inode it points
2214 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2215 struct btrfs_root *root,
2216 struct btrfs_root *log,
2217 struct btrfs_path *path,
2218 struct btrfs_path *log_path,
2220 struct btrfs_key *dir_key)
2223 struct extent_buffer *eb;
2226 struct btrfs_dir_item *di;
2227 struct btrfs_dir_item *log_di;
2230 unsigned long ptr_end;
2232 struct inode *inode;
2233 struct btrfs_key location;
2236 eb = path->nodes[0];
2237 slot = path->slots[0];
2238 item_size = btrfs_item_size_nr(eb, slot);
2239 ptr = btrfs_item_ptr_offset(eb, slot);
2240 ptr_end = ptr + item_size;
2241 while (ptr < ptr_end) {
2242 di = (struct btrfs_dir_item *)ptr;
2243 name_len = btrfs_dir_name_len(eb, di);
2244 name = kmalloc(name_len, GFP_NOFS);
2249 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2252 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2253 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2256 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2257 log_di = btrfs_lookup_dir_index_item(trans, log,
2263 if (!log_di || log_di == ERR_PTR(-ENOENT)) {
2264 btrfs_dir_item_key_to_cpu(eb, di, &location);
2265 btrfs_release_path(path);
2266 btrfs_release_path(log_path);
2267 inode = read_one_inode(root, location.objectid);
2273 ret = link_to_fixup_dir(trans, root,
2274 path, location.objectid);
2282 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2283 BTRFS_I(inode), name, name_len);
2285 ret = btrfs_run_delayed_items(trans);
2291 /* there might still be more names under this key
2292 * check and repeat if required
2294 ret = btrfs_search_slot(NULL, root, dir_key, path,
2300 } else if (IS_ERR(log_di)) {
2302 return PTR_ERR(log_di);
2304 btrfs_release_path(log_path);
2307 ptr = (unsigned long)(di + 1);
2312 btrfs_release_path(path);
2313 btrfs_release_path(log_path);
2317 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2318 struct btrfs_root *root,
2319 struct btrfs_root *log,
2320 struct btrfs_path *path,
2323 struct btrfs_key search_key;
2324 struct btrfs_path *log_path;
2329 log_path = btrfs_alloc_path();
2333 search_key.objectid = ino;
2334 search_key.type = BTRFS_XATTR_ITEM_KEY;
2335 search_key.offset = 0;
2337 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2341 nritems = btrfs_header_nritems(path->nodes[0]);
2342 for (i = path->slots[0]; i < nritems; i++) {
2343 struct btrfs_key key;
2344 struct btrfs_dir_item *di;
2345 struct btrfs_dir_item *log_di;
2349 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2350 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2355 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2356 total_size = btrfs_item_size_nr(path->nodes[0], i);
2358 while (cur < total_size) {
2359 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2360 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2361 u32 this_len = sizeof(*di) + name_len + data_len;
2364 name = kmalloc(name_len, GFP_NOFS);
2369 read_extent_buffer(path->nodes[0], name,
2370 (unsigned long)(di + 1), name_len);
2372 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2374 btrfs_release_path(log_path);
2376 /* Doesn't exist in log tree, so delete it. */
2377 btrfs_release_path(path);
2378 di = btrfs_lookup_xattr(trans, root, path, ino,
2379 name, name_len, -1);
2386 ret = btrfs_delete_one_dir_name(trans, root,
2390 btrfs_release_path(path);
2395 if (IS_ERR(log_di)) {
2396 ret = PTR_ERR(log_di);
2400 di = (struct btrfs_dir_item *)((char *)di + this_len);
2403 ret = btrfs_next_leaf(root, path);
2409 btrfs_free_path(log_path);
2410 btrfs_release_path(path);
2416 * deletion replay happens before we copy any new directory items
2417 * out of the log or out of backreferences from inodes. It
2418 * scans the log to find ranges of keys that log is authoritative for,
2419 * and then scans the directory to find items in those ranges that are
2420 * not present in the log.
2422 * Anything we don't find in the log is unlinked and removed from the
2425 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2426 struct btrfs_root *root,
2427 struct btrfs_root *log,
2428 struct btrfs_path *path,
2429 u64 dirid, int del_all)
2433 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2435 struct btrfs_key dir_key;
2436 struct btrfs_key found_key;
2437 struct btrfs_path *log_path;
2440 dir_key.objectid = dirid;
2441 dir_key.type = BTRFS_DIR_ITEM_KEY;
2442 log_path = btrfs_alloc_path();
2446 dir = read_one_inode(root, dirid);
2447 /* it isn't an error if the inode isn't there, that can happen
2448 * because we replay the deletes before we copy in the inode item
2452 btrfs_free_path(log_path);
2460 range_end = (u64)-1;
2462 ret = find_dir_range(log, path, dirid, key_type,
2463 &range_start, &range_end);
2468 dir_key.offset = range_start;
2471 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2476 nritems = btrfs_header_nritems(path->nodes[0]);
2477 if (path->slots[0] >= nritems) {
2478 ret = btrfs_next_leaf(root, path);
2484 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2486 if (found_key.objectid != dirid ||
2487 found_key.type != dir_key.type)
2490 if (found_key.offset > range_end)
2493 ret = check_item_in_log(trans, root, log, path,
2498 if (found_key.offset == (u64)-1)
2500 dir_key.offset = found_key.offset + 1;
2502 btrfs_release_path(path);
2503 if (range_end == (u64)-1)
2505 range_start = range_end + 1;
2510 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2511 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2512 dir_key.type = BTRFS_DIR_INDEX_KEY;
2513 btrfs_release_path(path);
2517 btrfs_release_path(path);
2518 btrfs_free_path(log_path);
2524 * the process_func used to replay items from the log tree. This
2525 * gets called in two different stages. The first stage just looks
2526 * for inodes and makes sure they are all copied into the subvolume.
2528 * The second stage copies all the other item types from the log into
2529 * the subvolume. The two stage approach is slower, but gets rid of
2530 * lots of complexity around inodes referencing other inodes that exist
2531 * only in the log (references come from either directory items or inode
2534 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2535 struct walk_control *wc, u64 gen, int level)
2538 struct btrfs_path *path;
2539 struct btrfs_root *root = wc->replay_dest;
2540 struct btrfs_key key;
2544 ret = btrfs_read_buffer(eb, gen, level, NULL);
2548 level = btrfs_header_level(eb);
2553 path = btrfs_alloc_path();
2557 nritems = btrfs_header_nritems(eb);
2558 for (i = 0; i < nritems; i++) {
2559 btrfs_item_key_to_cpu(eb, &key, i);
2561 /* inode keys are done during the first stage */
2562 if (key.type == BTRFS_INODE_ITEM_KEY &&
2563 wc->stage == LOG_WALK_REPLAY_INODES) {
2564 struct btrfs_inode_item *inode_item;
2567 inode_item = btrfs_item_ptr(eb, i,
2568 struct btrfs_inode_item);
2570 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2571 * and never got linked before the fsync, skip it, as
2572 * replaying it is pointless since it would be deleted
2573 * later. We skip logging tmpfiles, but it's always
2574 * possible we are replaying a log created with a kernel
2575 * that used to log tmpfiles.
2577 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2578 wc->ignore_cur_inode = true;
2581 wc->ignore_cur_inode = false;
2583 ret = replay_xattr_deletes(wc->trans, root, log,
2584 path, key.objectid);
2587 mode = btrfs_inode_mode(eb, inode_item);
2588 if (S_ISDIR(mode)) {
2589 ret = replay_dir_deletes(wc->trans,
2590 root, log, path, key.objectid, 0);
2594 ret = overwrite_item(wc->trans, root, path,
2600 * Before replaying extents, truncate the inode to its
2601 * size. We need to do it now and not after log replay
2602 * because before an fsync we can have prealloc extents
2603 * added beyond the inode's i_size. If we did it after,
2604 * through orphan cleanup for example, we would drop
2605 * those prealloc extents just after replaying them.
2607 if (S_ISREG(mode)) {
2608 struct inode *inode;
2611 inode = read_one_inode(root, key.objectid);
2616 from = ALIGN(i_size_read(inode),
2617 root->fs_info->sectorsize);
2618 ret = btrfs_drop_extents(wc->trans, root, inode,
2621 /* Update the inode's nbytes. */
2622 ret = btrfs_update_inode(wc->trans,
2630 ret = link_to_fixup_dir(wc->trans, root,
2631 path, key.objectid);
2636 if (wc->ignore_cur_inode)
2639 if (key.type == BTRFS_DIR_INDEX_KEY &&
2640 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2641 ret = replay_one_dir_item(wc->trans, root, path,
2647 if (wc->stage < LOG_WALK_REPLAY_ALL)
2650 /* these keys are simply copied */
2651 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2652 ret = overwrite_item(wc->trans, root, path,
2656 } else if (key.type == BTRFS_INODE_REF_KEY ||
2657 key.type == BTRFS_INODE_EXTREF_KEY) {
2658 ret = add_inode_ref(wc->trans, root, log, path,
2660 if (ret && ret != -ENOENT)
2663 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2664 ret = replay_one_extent(wc->trans, root, path,
2668 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2669 ret = replay_one_dir_item(wc->trans, root, path,
2675 btrfs_free_path(path);
2679 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2680 struct btrfs_root *root,
2681 struct btrfs_path *path, int *level,
2682 struct walk_control *wc)
2684 struct btrfs_fs_info *fs_info = root->fs_info;
2688 struct extent_buffer *next;
2689 struct extent_buffer *cur;
2690 struct extent_buffer *parent;
2694 WARN_ON(*level < 0);
2695 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2697 while (*level > 0) {
2698 struct btrfs_key first_key;
2700 WARN_ON(*level < 0);
2701 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2702 cur = path->nodes[*level];
2704 WARN_ON(btrfs_header_level(cur) != *level);
2706 if (path->slots[*level] >=
2707 btrfs_header_nritems(cur))
2710 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2711 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2712 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2713 blocksize = fs_info->nodesize;
2715 parent = path->nodes[*level];
2716 root_owner = btrfs_header_owner(parent);
2718 next = btrfs_find_create_tree_block(fs_info, bytenr);
2720 return PTR_ERR(next);
2723 ret = wc->process_func(root, next, wc, ptr_gen,
2726 free_extent_buffer(next);
2730 path->slots[*level]++;
2732 ret = btrfs_read_buffer(next, ptr_gen,
2733 *level - 1, &first_key);
2735 free_extent_buffer(next);
2740 btrfs_tree_lock(next);
2741 btrfs_set_lock_blocking_write(next);
2742 btrfs_clean_tree_block(next);
2743 btrfs_wait_tree_block_writeback(next);
2744 btrfs_tree_unlock(next);
2746 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2747 clear_extent_buffer_dirty(next);
2750 WARN_ON(root_owner !=
2751 BTRFS_TREE_LOG_OBJECTID);
2752 ret = btrfs_free_and_pin_reserved_extent(
2756 free_extent_buffer(next);
2760 free_extent_buffer(next);
2763 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2765 free_extent_buffer(next);
2769 WARN_ON(*level <= 0);
2770 if (path->nodes[*level-1])
2771 free_extent_buffer(path->nodes[*level-1]);
2772 path->nodes[*level-1] = next;
2773 *level = btrfs_header_level(next);
2774 path->slots[*level] = 0;
2777 WARN_ON(*level < 0);
2778 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2780 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2786 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2787 struct btrfs_root *root,
2788 struct btrfs_path *path, int *level,
2789 struct walk_control *wc)
2791 struct btrfs_fs_info *fs_info = root->fs_info;
2797 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2798 slot = path->slots[i];
2799 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2802 WARN_ON(*level == 0);
2805 struct extent_buffer *parent;
2806 if (path->nodes[*level] == root->node)
2807 parent = path->nodes[*level];
2809 parent = path->nodes[*level + 1];
2811 root_owner = btrfs_header_owner(parent);
2812 ret = wc->process_func(root, path->nodes[*level], wc,
2813 btrfs_header_generation(path->nodes[*level]),
2819 struct extent_buffer *next;
2821 next = path->nodes[*level];
2824 btrfs_tree_lock(next);
2825 btrfs_set_lock_blocking_write(next);
2826 btrfs_clean_tree_block(next);
2827 btrfs_wait_tree_block_writeback(next);
2828 btrfs_tree_unlock(next);
2830 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2831 clear_extent_buffer_dirty(next);
2834 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2835 ret = btrfs_free_and_pin_reserved_extent(
2837 path->nodes[*level]->start,
2838 path->nodes[*level]->len);
2842 free_extent_buffer(path->nodes[*level]);
2843 path->nodes[*level] = NULL;
2851 * drop the reference count on the tree rooted at 'snap'. This traverses
2852 * the tree freeing any blocks that have a ref count of zero after being
2855 static int walk_log_tree(struct btrfs_trans_handle *trans,
2856 struct btrfs_root *log, struct walk_control *wc)
2858 struct btrfs_fs_info *fs_info = log->fs_info;
2862 struct btrfs_path *path;
2865 path = btrfs_alloc_path();
2869 level = btrfs_header_level(log->node);
2871 path->nodes[level] = log->node;
2872 extent_buffer_get(log->node);
2873 path->slots[level] = 0;
2876 wret = walk_down_log_tree(trans, log, path, &level, wc);
2884 wret = walk_up_log_tree(trans, log, path, &level, wc);
2893 /* was the root node processed? if not, catch it here */
2894 if (path->nodes[orig_level]) {
2895 ret = wc->process_func(log, path->nodes[orig_level], wc,
2896 btrfs_header_generation(path->nodes[orig_level]),
2901 struct extent_buffer *next;
2903 next = path->nodes[orig_level];
2906 btrfs_tree_lock(next);
2907 btrfs_set_lock_blocking_write(next);
2908 btrfs_clean_tree_block(next);
2909 btrfs_wait_tree_block_writeback(next);
2910 btrfs_tree_unlock(next);
2912 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2913 clear_extent_buffer_dirty(next);
2916 WARN_ON(log->root_key.objectid !=
2917 BTRFS_TREE_LOG_OBJECTID);
2918 ret = btrfs_free_and_pin_reserved_extent(fs_info,
2919 next->start, next->len);
2926 btrfs_free_path(path);
2931 * helper function to update the item for a given subvolumes log root
2932 * in the tree of log roots
2934 static int update_log_root(struct btrfs_trans_handle *trans,
2935 struct btrfs_root *log)
2937 struct btrfs_fs_info *fs_info = log->fs_info;
2940 if (log->log_transid == 1) {
2941 /* insert root item on the first sync */
2942 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2943 &log->root_key, &log->root_item);
2945 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2946 &log->root_key, &log->root_item);
2951 static void wait_log_commit(struct btrfs_root *root, int transid)
2954 int index = transid % 2;
2957 * we only allow two pending log transactions at a time,
2958 * so we know that if ours is more than 2 older than the
2959 * current transaction, we're done
2962 prepare_to_wait(&root->log_commit_wait[index],
2963 &wait, TASK_UNINTERRUPTIBLE);
2965 if (!(root->log_transid_committed < transid &&
2966 atomic_read(&root->log_commit[index])))
2969 mutex_unlock(&root->log_mutex);
2971 mutex_lock(&root->log_mutex);
2973 finish_wait(&root->log_commit_wait[index], &wait);
2976 static void wait_for_writer(struct btrfs_root *root)
2981 prepare_to_wait(&root->log_writer_wait, &wait,
2982 TASK_UNINTERRUPTIBLE);
2983 if (!atomic_read(&root->log_writers))
2986 mutex_unlock(&root->log_mutex);
2988 mutex_lock(&root->log_mutex);
2990 finish_wait(&root->log_writer_wait, &wait);
2993 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2994 struct btrfs_log_ctx *ctx)
2999 mutex_lock(&root->log_mutex);
3000 list_del_init(&ctx->list);
3001 mutex_unlock(&root->log_mutex);
3005 * Invoked in log mutex context, or be sure there is no other task which
3006 * can access the list.
3008 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
3009 int index, int error)
3011 struct btrfs_log_ctx *ctx;
3012 struct btrfs_log_ctx *safe;
3014 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
3015 list_del_init(&ctx->list);
3016 ctx->log_ret = error;
3019 INIT_LIST_HEAD(&root->log_ctxs[index]);
3023 * btrfs_sync_log does sends a given tree log down to the disk and
3024 * updates the super blocks to record it. When this call is done,
3025 * you know that any inodes previously logged are safely on disk only
3028 * Any other return value means you need to call btrfs_commit_transaction.
3029 * Some of the edge cases for fsyncing directories that have had unlinks
3030 * or renames done in the past mean that sometimes the only safe
3031 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3032 * that has happened.
3034 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3035 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3041 struct btrfs_fs_info *fs_info = root->fs_info;
3042 struct btrfs_root *log = root->log_root;
3043 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3044 int log_transid = 0;
3045 struct btrfs_log_ctx root_log_ctx;
3046 struct blk_plug plug;
3048 mutex_lock(&root->log_mutex);
3049 log_transid = ctx->log_transid;
3050 if (root->log_transid_committed >= log_transid) {
3051 mutex_unlock(&root->log_mutex);
3052 return ctx->log_ret;
3055 index1 = log_transid % 2;
3056 if (atomic_read(&root->log_commit[index1])) {
3057 wait_log_commit(root, log_transid);
3058 mutex_unlock(&root->log_mutex);
3059 return ctx->log_ret;
3061 ASSERT(log_transid == root->log_transid);
3062 atomic_set(&root->log_commit[index1], 1);
3064 /* wait for previous tree log sync to complete */
3065 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3066 wait_log_commit(root, log_transid - 1);
3069 int batch = atomic_read(&root->log_batch);
3070 /* when we're on an ssd, just kick the log commit out */
3071 if (!btrfs_test_opt(fs_info, SSD) &&
3072 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3073 mutex_unlock(&root->log_mutex);
3074 schedule_timeout_uninterruptible(1);
3075 mutex_lock(&root->log_mutex);
3077 wait_for_writer(root);
3078 if (batch == atomic_read(&root->log_batch))
3082 /* bail out if we need to do a full commit */
3083 if (btrfs_need_log_full_commit(trans)) {
3085 mutex_unlock(&root->log_mutex);
3089 if (log_transid % 2 == 0)
3090 mark = EXTENT_DIRTY;
3094 /* we start IO on all the marked extents here, but we don't actually
3095 * wait for them until later.
3097 blk_start_plug(&plug);
3098 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3100 blk_finish_plug(&plug);
3101 btrfs_abort_transaction(trans, ret);
3102 btrfs_set_log_full_commit(trans);
3103 mutex_unlock(&root->log_mutex);
3107 btrfs_set_root_node(&log->root_item, log->node);
3109 root->log_transid++;
3110 log->log_transid = root->log_transid;
3111 root->log_start_pid = 0;
3113 * Update or create log root item under the root's log_mutex to prevent
3114 * races with concurrent log syncs that can lead to failure to update
3115 * log root item because it was not created yet.
3117 ret = update_log_root(trans, log);
3119 * IO has been started, blocks of the log tree have WRITTEN flag set
3120 * in their headers. new modifications of the log will be written to
3121 * new positions. so it's safe to allow log writers to go in.
3123 mutex_unlock(&root->log_mutex);
3125 btrfs_init_log_ctx(&root_log_ctx, NULL);
3127 mutex_lock(&log_root_tree->log_mutex);
3128 atomic_inc(&log_root_tree->log_batch);
3129 atomic_inc(&log_root_tree->log_writers);
3131 index2 = log_root_tree->log_transid % 2;
3132 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3133 root_log_ctx.log_transid = log_root_tree->log_transid;
3135 mutex_unlock(&log_root_tree->log_mutex);
3137 mutex_lock(&log_root_tree->log_mutex);
3138 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
3139 /* atomic_dec_and_test implies a barrier */
3140 cond_wake_up_nomb(&log_root_tree->log_writer_wait);
3144 if (!list_empty(&root_log_ctx.list))
3145 list_del_init(&root_log_ctx.list);
3147 blk_finish_plug(&plug);
3148 btrfs_set_log_full_commit(trans);
3150 if (ret != -ENOSPC) {
3151 btrfs_abort_transaction(trans, ret);
3152 mutex_unlock(&log_root_tree->log_mutex);
3155 btrfs_wait_tree_log_extents(log, mark);
3156 mutex_unlock(&log_root_tree->log_mutex);
3161 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3162 blk_finish_plug(&plug);
3163 list_del_init(&root_log_ctx.list);
3164 mutex_unlock(&log_root_tree->log_mutex);
3165 ret = root_log_ctx.log_ret;
3169 index2 = root_log_ctx.log_transid % 2;
3170 if (atomic_read(&log_root_tree->log_commit[index2])) {
3171 blk_finish_plug(&plug);
3172 ret = btrfs_wait_tree_log_extents(log, mark);
3173 wait_log_commit(log_root_tree,
3174 root_log_ctx.log_transid);
3175 mutex_unlock(&log_root_tree->log_mutex);
3177 ret = root_log_ctx.log_ret;
3180 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3181 atomic_set(&log_root_tree->log_commit[index2], 1);
3183 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3184 wait_log_commit(log_root_tree,
3185 root_log_ctx.log_transid - 1);
3188 wait_for_writer(log_root_tree);
3191 * now that we've moved on to the tree of log tree roots,
3192 * check the full commit flag again
3194 if (btrfs_need_log_full_commit(trans)) {
3195 blk_finish_plug(&plug);
3196 btrfs_wait_tree_log_extents(log, mark);
3197 mutex_unlock(&log_root_tree->log_mutex);
3199 goto out_wake_log_root;
3202 ret = btrfs_write_marked_extents(fs_info,
3203 &log_root_tree->dirty_log_pages,
3204 EXTENT_DIRTY | EXTENT_NEW);
3205 blk_finish_plug(&plug);
3207 btrfs_set_log_full_commit(trans);
3208 btrfs_abort_transaction(trans, ret);
3209 mutex_unlock(&log_root_tree->log_mutex);
3210 goto out_wake_log_root;
3212 ret = btrfs_wait_tree_log_extents(log, mark);
3214 ret = btrfs_wait_tree_log_extents(log_root_tree,
3215 EXTENT_NEW | EXTENT_DIRTY);
3217 btrfs_set_log_full_commit(trans);
3218 mutex_unlock(&log_root_tree->log_mutex);
3219 goto out_wake_log_root;
3222 btrfs_set_super_log_root(fs_info->super_for_commit,
3223 log_root_tree->node->start);
3224 btrfs_set_super_log_root_level(fs_info->super_for_commit,
3225 btrfs_header_level(log_root_tree->node));
3227 log_root_tree->log_transid++;
3228 mutex_unlock(&log_root_tree->log_mutex);
3231 * Nobody else is going to jump in and write the ctree
3232 * super here because the log_commit atomic below is protecting
3233 * us. We must be called with a transaction handle pinning
3234 * the running transaction open, so a full commit can't hop
3235 * in and cause problems either.
3237 ret = write_all_supers(fs_info, 1);
3239 btrfs_set_log_full_commit(trans);
3240 btrfs_abort_transaction(trans, ret);
3241 goto out_wake_log_root;
3244 mutex_lock(&root->log_mutex);
3245 if (root->last_log_commit < log_transid)
3246 root->last_log_commit = log_transid;
3247 mutex_unlock(&root->log_mutex);
3250 mutex_lock(&log_root_tree->log_mutex);
3251 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3253 log_root_tree->log_transid_committed++;
3254 atomic_set(&log_root_tree->log_commit[index2], 0);
3255 mutex_unlock(&log_root_tree->log_mutex);
3258 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3259 * all the updates above are seen by the woken threads. It might not be
3260 * necessary, but proving that seems to be hard.
3262 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3264 mutex_lock(&root->log_mutex);
3265 btrfs_remove_all_log_ctxs(root, index1, ret);
3266 root->log_transid_committed++;
3267 atomic_set(&root->log_commit[index1], 0);
3268 mutex_unlock(&root->log_mutex);
3271 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3272 * all the updates above are seen by the woken threads. It might not be
3273 * necessary, but proving that seems to be hard.
3275 cond_wake_up(&root->log_commit_wait[index1]);
3279 static void free_log_tree(struct btrfs_trans_handle *trans,
3280 struct btrfs_root *log)
3283 struct walk_control wc = {
3285 .process_func = process_one_buffer
3288 ret = walk_log_tree(trans, log, &wc);
3291 btrfs_abort_transaction(trans, ret);
3293 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3296 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3297 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3298 free_extent_buffer(log->node);
3303 * free all the extents used by the tree log. This should be called
3304 * at commit time of the full transaction
3306 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3308 if (root->log_root) {
3309 free_log_tree(trans, root->log_root);
3310 root->log_root = NULL;
3315 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3316 struct btrfs_fs_info *fs_info)
3318 if (fs_info->log_root_tree) {
3319 free_log_tree(trans, fs_info->log_root_tree);
3320 fs_info->log_root_tree = NULL;
3326 * If both a file and directory are logged, and unlinks or renames are
3327 * mixed in, we have a few interesting corners:
3329 * create file X in dir Y
3330 * link file X to X.link in dir Y
3332 * unlink file X but leave X.link
3335 * After a crash we would expect only X.link to exist. But file X
3336 * didn't get fsync'd again so the log has back refs for X and X.link.
3338 * We solve this by removing directory entries and inode backrefs from the
3339 * log when a file that was logged in the current transaction is
3340 * unlinked. Any later fsync will include the updated log entries, and
3341 * we'll be able to reconstruct the proper directory items from backrefs.
3343 * This optimizations allows us to avoid relogging the entire inode
3344 * or the entire directory.
3346 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3347 struct btrfs_root *root,
3348 const char *name, int name_len,
3349 struct btrfs_inode *dir, u64 index)
3351 struct btrfs_root *log;
3352 struct btrfs_dir_item *di;
3353 struct btrfs_path *path;
3357 u64 dir_ino = btrfs_ino(dir);
3359 if (dir->logged_trans < trans->transid)
3362 ret = join_running_log_trans(root);
3366 mutex_lock(&dir->log_mutex);
3368 log = root->log_root;
3369 path = btrfs_alloc_path();
3375 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3376 name, name_len, -1);
3382 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3383 bytes_del += name_len;
3389 btrfs_release_path(path);
3390 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3391 index, name, name_len, -1);
3397 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3398 bytes_del += name_len;
3405 /* update the directory size in the log to reflect the names
3409 struct btrfs_key key;
3411 key.objectid = dir_ino;
3413 key.type = BTRFS_INODE_ITEM_KEY;
3414 btrfs_release_path(path);
3416 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3422 struct btrfs_inode_item *item;
3425 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3426 struct btrfs_inode_item);
3427 i_size = btrfs_inode_size(path->nodes[0], item);
3428 if (i_size > bytes_del)
3429 i_size -= bytes_del;
3432 btrfs_set_inode_size(path->nodes[0], item, i_size);
3433 btrfs_mark_buffer_dirty(path->nodes[0]);
3436 btrfs_release_path(path);
3439 btrfs_free_path(path);
3441 mutex_unlock(&dir->log_mutex);
3442 if (ret == -ENOSPC) {
3443 btrfs_set_log_full_commit(trans);
3446 btrfs_abort_transaction(trans, ret);
3448 btrfs_end_log_trans(root);
3453 /* see comments for btrfs_del_dir_entries_in_log */
3454 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3455 struct btrfs_root *root,
3456 const char *name, int name_len,
3457 struct btrfs_inode *inode, u64 dirid)
3459 struct btrfs_root *log;
3463 if (inode->logged_trans < trans->transid)
3466 ret = join_running_log_trans(root);
3469 log = root->log_root;
3470 mutex_lock(&inode->log_mutex);
3472 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3474 mutex_unlock(&inode->log_mutex);
3475 if (ret == -ENOSPC) {
3476 btrfs_set_log_full_commit(trans);
3478 } else if (ret < 0 && ret != -ENOENT)
3479 btrfs_abort_transaction(trans, ret);
3480 btrfs_end_log_trans(root);
3486 * creates a range item in the log for 'dirid'. first_offset and
3487 * last_offset tell us which parts of the key space the log should
3488 * be considered authoritative for.
3490 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3491 struct btrfs_root *log,
3492 struct btrfs_path *path,
3493 int key_type, u64 dirid,
3494 u64 first_offset, u64 last_offset)
3497 struct btrfs_key key;
3498 struct btrfs_dir_log_item *item;
3500 key.objectid = dirid;
3501 key.offset = first_offset;
3502 if (key_type == BTRFS_DIR_ITEM_KEY)
3503 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3505 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3506 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3510 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3511 struct btrfs_dir_log_item);
3512 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3513 btrfs_mark_buffer_dirty(path->nodes[0]);
3514 btrfs_release_path(path);
3519 * log all the items included in the current transaction for a given
3520 * directory. This also creates the range items in the log tree required
3521 * to replay anything deleted before the fsync
3523 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3524 struct btrfs_root *root, struct btrfs_inode *inode,
3525 struct btrfs_path *path,
3526 struct btrfs_path *dst_path, int key_type,
3527 struct btrfs_log_ctx *ctx,
3528 u64 min_offset, u64 *last_offset_ret)
3530 struct btrfs_key min_key;
3531 struct btrfs_root *log = root->log_root;
3532 struct extent_buffer *src;
3537 u64 first_offset = min_offset;
3538 u64 last_offset = (u64)-1;
3539 u64 ino = btrfs_ino(inode);
3541 log = root->log_root;
3543 min_key.objectid = ino;
3544 min_key.type = key_type;
3545 min_key.offset = min_offset;
3547 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3550 * we didn't find anything from this transaction, see if there
3551 * is anything at all
3553 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3554 min_key.objectid = ino;
3555 min_key.type = key_type;
3556 min_key.offset = (u64)-1;
3557 btrfs_release_path(path);
3558 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3560 btrfs_release_path(path);
3563 ret = btrfs_previous_item(root, path, ino, key_type);
3565 /* if ret == 0 there are items for this type,
3566 * create a range to tell us the last key of this type.
3567 * otherwise, there are no items in this directory after
3568 * *min_offset, and we create a range to indicate that.
3571 struct btrfs_key tmp;
3572 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3574 if (key_type == tmp.type)
3575 first_offset = max(min_offset, tmp.offset) + 1;
3580 /* go backward to find any previous key */
3581 ret = btrfs_previous_item(root, path, ino, key_type);
3583 struct btrfs_key tmp;
3584 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3585 if (key_type == tmp.type) {
3586 first_offset = tmp.offset;
3587 ret = overwrite_item(trans, log, dst_path,
3588 path->nodes[0], path->slots[0],
3596 btrfs_release_path(path);
3599 * Find the first key from this transaction again. See the note for
3600 * log_new_dir_dentries, if we're logging a directory recursively we
3601 * won't be holding its i_mutex, which means we can modify the directory
3602 * while we're logging it. If we remove an entry between our first
3603 * search and this search we'll not find the key again and can just
3606 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3611 * we have a block from this transaction, log every item in it
3612 * from our directory
3615 struct btrfs_key tmp;
3616 src = path->nodes[0];
3617 nritems = btrfs_header_nritems(src);
3618 for (i = path->slots[0]; i < nritems; i++) {
3619 struct btrfs_dir_item *di;
3621 btrfs_item_key_to_cpu(src, &min_key, i);
3623 if (min_key.objectid != ino || min_key.type != key_type)
3625 ret = overwrite_item(trans, log, dst_path, src, i,
3633 * We must make sure that when we log a directory entry,
3634 * the corresponding inode, after log replay, has a
3635 * matching link count. For example:
3641 * xfs_io -c "fsync" mydir
3643 * <mount fs and log replay>
3645 * Would result in a fsync log that when replayed, our
3646 * file inode would have a link count of 1, but we get
3647 * two directory entries pointing to the same inode.
3648 * After removing one of the names, it would not be
3649 * possible to remove the other name, which resulted
3650 * always in stale file handle errors, and would not
3651 * be possible to rmdir the parent directory, since
3652 * its i_size could never decrement to the value
3653 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3655 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3656 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3658 (btrfs_dir_transid(src, di) == trans->transid ||
3659 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3660 tmp.type != BTRFS_ROOT_ITEM_KEY)
3661 ctx->log_new_dentries = true;
3663 path->slots[0] = nritems;
3666 * look ahead to the next item and see if it is also
3667 * from this directory and from this transaction
3669 ret = btrfs_next_leaf(root, path);
3672 last_offset = (u64)-1;
3677 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3678 if (tmp.objectid != ino || tmp.type != key_type) {
3679 last_offset = (u64)-1;
3682 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3683 ret = overwrite_item(trans, log, dst_path,
3684 path->nodes[0], path->slots[0],
3689 last_offset = tmp.offset;
3694 btrfs_release_path(path);
3695 btrfs_release_path(dst_path);
3698 *last_offset_ret = last_offset;
3700 * insert the log range keys to indicate where the log
3703 ret = insert_dir_log_key(trans, log, path, key_type,
3704 ino, first_offset, last_offset);
3712 * logging directories is very similar to logging inodes, We find all the items
3713 * from the current transaction and write them to the log.
3715 * The recovery code scans the directory in the subvolume, and if it finds a
3716 * key in the range logged that is not present in the log tree, then it means
3717 * that dir entry was unlinked during the transaction.
3719 * In order for that scan to work, we must include one key smaller than
3720 * the smallest logged by this transaction and one key larger than the largest
3721 * key logged by this transaction.
3723 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3724 struct btrfs_root *root, struct btrfs_inode *inode,
3725 struct btrfs_path *path,
3726 struct btrfs_path *dst_path,
3727 struct btrfs_log_ctx *ctx)
3732 int key_type = BTRFS_DIR_ITEM_KEY;
3738 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3739 ctx, min_key, &max_key);
3742 if (max_key == (u64)-1)
3744 min_key = max_key + 1;
3747 if (key_type == BTRFS_DIR_ITEM_KEY) {
3748 key_type = BTRFS_DIR_INDEX_KEY;
3755 * a helper function to drop items from the log before we relog an
3756 * inode. max_key_type indicates the highest item type to remove.
3757 * This cannot be run for file data extents because it does not
3758 * free the extents they point to.
3760 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3761 struct btrfs_root *log,
3762 struct btrfs_path *path,
3763 u64 objectid, int max_key_type)
3766 struct btrfs_key key;
3767 struct btrfs_key found_key;
3770 key.objectid = objectid;
3771 key.type = max_key_type;
3772 key.offset = (u64)-1;
3775 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3776 BUG_ON(ret == 0); /* Logic error */
3780 if (path->slots[0] == 0)
3784 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3787 if (found_key.objectid != objectid)
3790 found_key.offset = 0;
3792 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3797 ret = btrfs_del_items(trans, log, path, start_slot,
3798 path->slots[0] - start_slot + 1);
3800 * If start slot isn't 0 then we don't need to re-search, we've
3801 * found the last guy with the objectid in this tree.
3803 if (ret || start_slot != 0)
3805 btrfs_release_path(path);
3807 btrfs_release_path(path);
3813 static void fill_inode_item(struct btrfs_trans_handle *trans,
3814 struct extent_buffer *leaf,
3815 struct btrfs_inode_item *item,
3816 struct inode *inode, int log_inode_only,
3819 struct btrfs_map_token token;
3821 btrfs_init_map_token(&token);
3823 if (log_inode_only) {
3824 /* set the generation to zero so the recover code
3825 * can tell the difference between an logging
3826 * just to say 'this inode exists' and a logging
3827 * to say 'update this inode with these values'
3829 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3830 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3832 btrfs_set_token_inode_generation(leaf, item,
3833 BTRFS_I(inode)->generation,
3835 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3838 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3839 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3840 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3841 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3843 btrfs_set_token_timespec_sec(leaf, &item->atime,
3844 inode->i_atime.tv_sec, &token);
3845 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3846 inode->i_atime.tv_nsec, &token);
3848 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3849 inode->i_mtime.tv_sec, &token);
3850 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3851 inode->i_mtime.tv_nsec, &token);
3853 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3854 inode->i_ctime.tv_sec, &token);
3855 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3856 inode->i_ctime.tv_nsec, &token);
3858 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3861 btrfs_set_token_inode_sequence(leaf, item,
3862 inode_peek_iversion(inode), &token);
3863 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3864 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3865 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3866 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3869 static int log_inode_item(struct btrfs_trans_handle *trans,
3870 struct btrfs_root *log, struct btrfs_path *path,
3871 struct btrfs_inode *inode)
3873 struct btrfs_inode_item *inode_item;
3876 ret = btrfs_insert_empty_item(trans, log, path,
3877 &inode->location, sizeof(*inode_item));
3878 if (ret && ret != -EEXIST)
3880 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3881 struct btrfs_inode_item);
3882 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
3884 btrfs_release_path(path);
3888 static noinline int copy_items(struct btrfs_trans_handle *trans,
3889 struct btrfs_inode *inode,
3890 struct btrfs_path *dst_path,
3891 struct btrfs_path *src_path, u64 *last_extent,
3892 int start_slot, int nr, int inode_only,
3895 struct btrfs_fs_info *fs_info = trans->fs_info;
3896 unsigned long src_offset;
3897 unsigned long dst_offset;
3898 struct btrfs_root *log = inode->root->log_root;
3899 struct btrfs_file_extent_item *extent;
3900 struct btrfs_inode_item *inode_item;
3901 struct extent_buffer *src = src_path->nodes[0];
3902 struct btrfs_key first_key, last_key, key;
3904 struct btrfs_key *ins_keys;
3908 struct list_head ordered_sums;
3909 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
3910 bool has_extents = false;
3911 bool need_find_last_extent = true;
3914 INIT_LIST_HEAD(&ordered_sums);
3916 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3917 nr * sizeof(u32), GFP_NOFS);
3921 first_key.objectid = (u64)-1;
3923 ins_sizes = (u32 *)ins_data;
3924 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3926 for (i = 0; i < nr; i++) {
3927 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3928 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3930 ret = btrfs_insert_empty_items(trans, log, dst_path,
3931 ins_keys, ins_sizes, nr);
3937 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3938 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3939 dst_path->slots[0]);
3941 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3944 last_key = ins_keys[i];
3946 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3947 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3949 struct btrfs_inode_item);
3950 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3952 inode_only == LOG_INODE_EXISTS,
3955 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3956 src_offset, ins_sizes[i]);
3960 * We set need_find_last_extent here in case we know we were
3961 * processing other items and then walk into the first extent in
3962 * the inode. If we don't hit an extent then nothing changes,
3963 * we'll do the last search the next time around.
3965 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3967 if (first_key.objectid == (u64)-1)
3968 first_key = ins_keys[i];
3970 need_find_last_extent = false;
3973 /* take a reference on file data extents so that truncates
3974 * or deletes of this inode don't have to relog the inode
3977 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3980 extent = btrfs_item_ptr(src, start_slot + i,
3981 struct btrfs_file_extent_item);
3983 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3986 found_type = btrfs_file_extent_type(src, extent);
3987 if (found_type == BTRFS_FILE_EXTENT_REG) {
3989 ds = btrfs_file_extent_disk_bytenr(src,
3991 /* ds == 0 is a hole */
3995 dl = btrfs_file_extent_disk_num_bytes(src,
3997 cs = btrfs_file_extent_offset(src, extent);
3998 cl = btrfs_file_extent_num_bytes(src,
4000 if (btrfs_file_extent_compression(src,
4006 ret = btrfs_lookup_csums_range(
4008 ds + cs, ds + cs + cl - 1,
4011 btrfs_release_path(dst_path);
4019 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4020 btrfs_release_path(dst_path);
4024 * we have to do this after the loop above to avoid changing the
4025 * log tree while trying to change the log tree.
4028 while (!list_empty(&ordered_sums)) {
4029 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4030 struct btrfs_ordered_sum,
4033 ret = btrfs_csum_file_blocks(trans, log, sums);
4034 list_del(&sums->list);
4041 if (need_find_last_extent && *last_extent == first_key.offset) {
4043 * We don't have any leafs between our current one and the one
4044 * we processed before that can have file extent items for our
4045 * inode (and have a generation number smaller than our current
4048 need_find_last_extent = false;
4052 * Because we use btrfs_search_forward we could skip leaves that were
4053 * not modified and then assume *last_extent is valid when it really
4054 * isn't. So back up to the previous leaf and read the end of the last
4055 * extent before we go and fill in holes.
4057 if (need_find_last_extent) {
4060 ret = btrfs_prev_leaf(inode->root, src_path);
4065 if (src_path->slots[0])
4066 src_path->slots[0]--;
4067 src = src_path->nodes[0];
4068 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
4069 if (key.objectid != btrfs_ino(inode) ||
4070 key.type != BTRFS_EXTENT_DATA_KEY)
4072 extent = btrfs_item_ptr(src, src_path->slots[0],
4073 struct btrfs_file_extent_item);
4074 if (btrfs_file_extent_type(src, extent) ==
4075 BTRFS_FILE_EXTENT_INLINE) {
4076 len = btrfs_file_extent_ram_bytes(src, extent);
4077 *last_extent = ALIGN(key.offset + len,
4078 fs_info->sectorsize);
4080 len = btrfs_file_extent_num_bytes(src, extent);
4081 *last_extent = key.offset + len;
4085 /* So we did prev_leaf, now we need to move to the next leaf, but a few
4086 * things could have happened
4088 * 1) A merge could have happened, so we could currently be on a leaf
4089 * that holds what we were copying in the first place.
4090 * 2) A split could have happened, and now not all of the items we want
4091 * are on the same leaf.
4093 * So we need to adjust how we search for holes, we need to drop the
4094 * path and re-search for the first extent key we found, and then walk
4095 * forward until we hit the last one we copied.
4097 if (need_find_last_extent) {
4098 /* btrfs_prev_leaf could return 1 without releasing the path */
4099 btrfs_release_path(src_path);
4100 ret = btrfs_search_slot(NULL, inode->root, &first_key,
4105 src = src_path->nodes[0];
4106 i = src_path->slots[0];
4112 * Ok so here we need to go through and fill in any holes we may have
4113 * to make sure that holes are punched for those areas in case they had
4114 * extents previously.
4120 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
4121 ret = btrfs_next_leaf(inode->root, src_path);
4125 src = src_path->nodes[0];
4127 need_find_last_extent = true;
4130 btrfs_item_key_to_cpu(src, &key, i);
4131 if (!btrfs_comp_cpu_keys(&key, &last_key))
4133 if (key.objectid != btrfs_ino(inode) ||
4134 key.type != BTRFS_EXTENT_DATA_KEY) {
4138 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
4139 if (btrfs_file_extent_type(src, extent) ==
4140 BTRFS_FILE_EXTENT_INLINE) {
4141 len = btrfs_file_extent_ram_bytes(src, extent);
4142 extent_end = ALIGN(key.offset + len,
4143 fs_info->sectorsize);
4145 len = btrfs_file_extent_num_bytes(src, extent);
4146 extent_end = key.offset + len;
4150 if (*last_extent == key.offset) {
4151 *last_extent = extent_end;
4154 offset = *last_extent;
4155 len = key.offset - *last_extent;
4156 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
4157 offset, 0, 0, len, 0, len, 0, 0, 0);
4160 *last_extent = extent_end;
4164 * Check if there is a hole between the last extent found in our leaf
4165 * and the first extent in the next leaf. If there is one, we need to
4166 * log an explicit hole so that at replay time we can punch the hole.
4169 key.objectid == btrfs_ino(inode) &&
4170 key.type == BTRFS_EXTENT_DATA_KEY &&
4171 i == btrfs_header_nritems(src_path->nodes[0])) {
4172 ret = btrfs_next_leaf(inode->root, src_path);
4173 need_find_last_extent = true;
4176 } else if (ret == 0) {
4177 btrfs_item_key_to_cpu(src_path->nodes[0], &key,
4178 src_path->slots[0]);
4179 if (key.objectid == btrfs_ino(inode) &&
4180 key.type == BTRFS_EXTENT_DATA_KEY &&
4181 *last_extent < key.offset) {
4182 const u64 len = key.offset - *last_extent;
4184 ret = btrfs_insert_file_extent(trans, log,
4189 *last_extent += len;
4194 * Need to let the callers know we dropped the path so they should
4197 if (!ret && need_find_last_extent)
4202 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
4204 struct extent_map *em1, *em2;
4206 em1 = list_entry(a, struct extent_map, list);
4207 em2 = list_entry(b, struct extent_map, list);
4209 if (em1->start < em2->start)
4211 else if (em1->start > em2->start)
4216 static int log_extent_csums(struct btrfs_trans_handle *trans,
4217 struct btrfs_inode *inode,
4218 struct btrfs_root *log_root,
4219 const struct extent_map *em)
4223 LIST_HEAD(ordered_sums);
4226 if (inode->flags & BTRFS_INODE_NODATASUM ||
4227 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4228 em->block_start == EXTENT_MAP_HOLE)
4231 /* If we're compressed we have to save the entire range of csums. */
4232 if (em->compress_type) {
4234 csum_len = max(em->block_len, em->orig_block_len);
4236 csum_offset = em->mod_start - em->start;
4237 csum_len = em->mod_len;
4240 /* block start is already adjusted for the file extent offset. */
4241 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4242 em->block_start + csum_offset,
4243 em->block_start + csum_offset +
4244 csum_len - 1, &ordered_sums, 0);
4248 while (!list_empty(&ordered_sums)) {
4249 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4250 struct btrfs_ordered_sum,
4253 ret = btrfs_csum_file_blocks(trans, log_root, sums);
4254 list_del(&sums->list);
4261 static int log_one_extent(struct btrfs_trans_handle *trans,
4262 struct btrfs_inode *inode, struct btrfs_root *root,
4263 const struct extent_map *em,
4264 struct btrfs_path *path,
4265 struct btrfs_log_ctx *ctx)
4267 struct btrfs_root *log = root->log_root;
4268 struct btrfs_file_extent_item *fi;
4269 struct extent_buffer *leaf;
4270 struct btrfs_map_token token;
4271 struct btrfs_key key;
4272 u64 extent_offset = em->start - em->orig_start;
4275 int extent_inserted = 0;
4277 ret = log_extent_csums(trans, inode, log, em);
4281 btrfs_init_map_token(&token);
4283 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
4284 em->start + em->len, NULL, 0, 1,
4285 sizeof(*fi), &extent_inserted);
4289 if (!extent_inserted) {
4290 key.objectid = btrfs_ino(inode);
4291 key.type = BTRFS_EXTENT_DATA_KEY;
4292 key.offset = em->start;
4294 ret = btrfs_insert_empty_item(trans, log, path, &key,
4299 leaf = path->nodes[0];
4300 fi = btrfs_item_ptr(leaf, path->slots[0],
4301 struct btrfs_file_extent_item);
4303 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4305 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4306 btrfs_set_token_file_extent_type(leaf, fi,
4307 BTRFS_FILE_EXTENT_PREALLOC,
4310 btrfs_set_token_file_extent_type(leaf, fi,
4311 BTRFS_FILE_EXTENT_REG,
4314 block_len = max(em->block_len, em->orig_block_len);
4315 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4316 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4319 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4321 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4322 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4324 extent_offset, &token);
4325 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4328 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4329 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4333 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4334 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4335 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4336 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4338 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4339 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4340 btrfs_mark_buffer_dirty(leaf);
4342 btrfs_release_path(path);
4348 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4349 * lose them after doing a fast fsync and replaying the log. We scan the
4350 * subvolume's root instead of iterating the inode's extent map tree because
4351 * otherwise we can log incorrect extent items based on extent map conversion.
4352 * That can happen due to the fact that extent maps are merged when they
4353 * are not in the extent map tree's list of modified extents.
4355 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4356 struct btrfs_inode *inode,
4357 struct btrfs_path *path)
4359 struct btrfs_root *root = inode->root;
4360 struct btrfs_key key;
4361 const u64 i_size = i_size_read(&inode->vfs_inode);
4362 const u64 ino = btrfs_ino(inode);
4363 struct btrfs_path *dst_path = NULL;
4364 u64 last_extent = (u64)-1;
4369 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4373 key.type = BTRFS_EXTENT_DATA_KEY;
4374 key.offset = i_size;
4375 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4380 struct extent_buffer *leaf = path->nodes[0];
4381 int slot = path->slots[0];
4383 if (slot >= btrfs_header_nritems(leaf)) {
4385 ret = copy_items(trans, inode, dst_path, path,
4386 &last_extent, start_slot,
4392 ret = btrfs_next_leaf(root, path);
4402 btrfs_item_key_to_cpu(leaf, &key, slot);
4403 if (key.objectid > ino)
4405 if (WARN_ON_ONCE(key.objectid < ino) ||
4406 key.type < BTRFS_EXTENT_DATA_KEY ||
4407 key.offset < i_size) {
4411 if (last_extent == (u64)-1) {
4412 last_extent = key.offset;
4414 * Avoid logging extent items logged in past fsync calls
4415 * and leading to duplicate keys in the log tree.
4418 ret = btrfs_truncate_inode_items(trans,
4422 BTRFS_EXTENT_DATA_KEY);
4423 } while (ret == -EAGAIN);
4432 dst_path = btrfs_alloc_path();
4440 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4441 start_slot, ins_nr, 1, 0);
4446 btrfs_release_path(path);
4447 btrfs_free_path(dst_path);
4451 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4452 struct btrfs_root *root,
4453 struct btrfs_inode *inode,
4454 struct btrfs_path *path,
4455 struct btrfs_log_ctx *ctx,
4459 struct extent_map *em, *n;
4460 struct list_head extents;
4461 struct extent_map_tree *tree = &inode->extent_tree;
4466 INIT_LIST_HEAD(&extents);
4468 write_lock(&tree->lock);
4469 test_gen = root->fs_info->last_trans_committed;
4471 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4473 * Skip extents outside our logging range. It's important to do
4474 * it for correctness because if we don't ignore them, we may
4475 * log them before their ordered extent completes, and therefore
4476 * we could log them without logging their respective checksums
4477 * (the checksum items are added to the csum tree at the very
4478 * end of btrfs_finish_ordered_io()). Also leave such extents
4479 * outside of our range in the list, since we may have another
4480 * ranged fsync in the near future that needs them. If an extent
4481 * outside our range corresponds to a hole, log it to avoid
4482 * leaving gaps between extents (fsck will complain when we are
4483 * not using the NO_HOLES feature).
4485 if ((em->start > end || em->start + em->len <= start) &&
4486 em->block_start != EXTENT_MAP_HOLE)
4489 list_del_init(&em->list);
4491 * Just an arbitrary number, this can be really CPU intensive
4492 * once we start getting a lot of extents, and really once we
4493 * have a bunch of extents we just want to commit since it will
4496 if (++num > 32768) {
4497 list_del_init(&tree->modified_extents);
4502 if (em->generation <= test_gen)
4505 /* We log prealloc extents beyond eof later. */
4506 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4507 em->start >= i_size_read(&inode->vfs_inode))
4510 /* Need a ref to keep it from getting evicted from cache */
4511 refcount_inc(&em->refs);
4512 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4513 list_add_tail(&em->list, &extents);
4517 list_sort(NULL, &extents, extent_cmp);
4519 while (!list_empty(&extents)) {
4520 em = list_entry(extents.next, struct extent_map, list);
4522 list_del_init(&em->list);
4525 * If we had an error we just need to delete everybody from our
4529 clear_em_logging(tree, em);
4530 free_extent_map(em);
4534 write_unlock(&tree->lock);
4536 ret = log_one_extent(trans, inode, root, em, path, ctx);
4537 write_lock(&tree->lock);
4538 clear_em_logging(tree, em);
4539 free_extent_map(em);
4541 WARN_ON(!list_empty(&extents));
4542 write_unlock(&tree->lock);
4544 btrfs_release_path(path);
4546 ret = btrfs_log_prealloc_extents(trans, inode, path);
4551 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4552 struct btrfs_path *path, u64 *size_ret)
4554 struct btrfs_key key;
4557 key.objectid = btrfs_ino(inode);
4558 key.type = BTRFS_INODE_ITEM_KEY;
4561 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4564 } else if (ret > 0) {
4567 struct btrfs_inode_item *item;
4569 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4570 struct btrfs_inode_item);
4571 *size_ret = btrfs_inode_size(path->nodes[0], item);
4573 * If the in-memory inode's i_size is smaller then the inode
4574 * size stored in the btree, return the inode's i_size, so
4575 * that we get a correct inode size after replaying the log
4576 * when before a power failure we had a shrinking truncate
4577 * followed by addition of a new name (rename / new hard link).
4578 * Otherwise return the inode size from the btree, to avoid
4579 * data loss when replaying a log due to previously doing a
4580 * write that expands the inode's size and logging a new name
4581 * immediately after.
4583 if (*size_ret > inode->vfs_inode.i_size)
4584 *size_ret = inode->vfs_inode.i_size;
4587 btrfs_release_path(path);
4592 * At the moment we always log all xattrs. This is to figure out at log replay
4593 * time which xattrs must have their deletion replayed. If a xattr is missing
4594 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4595 * because if a xattr is deleted, the inode is fsynced and a power failure
4596 * happens, causing the log to be replayed the next time the fs is mounted,
4597 * we want the xattr to not exist anymore (same behaviour as other filesystems
4598 * with a journal, ext3/4, xfs, f2fs, etc).
4600 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4601 struct btrfs_root *root,
4602 struct btrfs_inode *inode,
4603 struct btrfs_path *path,
4604 struct btrfs_path *dst_path)
4607 struct btrfs_key key;
4608 const u64 ino = btrfs_ino(inode);
4613 key.type = BTRFS_XATTR_ITEM_KEY;
4616 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4621 int slot = path->slots[0];
4622 struct extent_buffer *leaf = path->nodes[0];
4623 int nritems = btrfs_header_nritems(leaf);
4625 if (slot >= nritems) {
4627 u64 last_extent = 0;
4629 ret = copy_items(trans, inode, dst_path, path,
4630 &last_extent, start_slot,
4632 /* can't be 1, extent items aren't processed */
4638 ret = btrfs_next_leaf(root, path);
4646 btrfs_item_key_to_cpu(leaf, &key, slot);
4647 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4657 u64 last_extent = 0;
4659 ret = copy_items(trans, inode, dst_path, path,
4660 &last_extent, start_slot,
4662 /* can't be 1, extent items aren't processed */
4672 * If the no holes feature is enabled we need to make sure any hole between the
4673 * last extent and the i_size of our inode is explicitly marked in the log. This
4674 * is to make sure that doing something like:
4676 * 1) create file with 128Kb of data
4677 * 2) truncate file to 64Kb
4678 * 3) truncate file to 256Kb
4680 * 5) <crash/power failure>
4681 * 6) mount fs and trigger log replay
4683 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4684 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4685 * file correspond to a hole. The presence of explicit holes in a log tree is
4686 * what guarantees that log replay will remove/adjust file extent items in the
4689 * Here we do not need to care about holes between extents, that is already done
4690 * by copy_items(). We also only need to do this in the full sync path, where we
4691 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4692 * lookup the list of modified extent maps and if any represents a hole, we
4693 * insert a corresponding extent representing a hole in the log tree.
4695 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4696 struct btrfs_root *root,
4697 struct btrfs_inode *inode,
4698 struct btrfs_path *path)
4700 struct btrfs_fs_info *fs_info = root->fs_info;
4702 struct btrfs_key key;
4705 struct extent_buffer *leaf;
4706 struct btrfs_root *log = root->log_root;
4707 const u64 ino = btrfs_ino(inode);
4708 const u64 i_size = i_size_read(&inode->vfs_inode);
4710 if (!btrfs_fs_incompat(fs_info, NO_HOLES))
4714 key.type = BTRFS_EXTENT_DATA_KEY;
4715 key.offset = (u64)-1;
4717 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4722 ASSERT(path->slots[0] > 0);
4724 leaf = path->nodes[0];
4725 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4727 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4728 /* inode does not have any extents */
4732 struct btrfs_file_extent_item *extent;
4736 * If there's an extent beyond i_size, an explicit hole was
4737 * already inserted by copy_items().
4739 if (key.offset >= i_size)
4742 extent = btrfs_item_ptr(leaf, path->slots[0],
4743 struct btrfs_file_extent_item);
4745 if (btrfs_file_extent_type(leaf, extent) ==
4746 BTRFS_FILE_EXTENT_INLINE)
4749 len = btrfs_file_extent_num_bytes(leaf, extent);
4750 /* Last extent goes beyond i_size, no need to log a hole. */
4751 if (key.offset + len > i_size)
4753 hole_start = key.offset + len;
4754 hole_size = i_size - hole_start;
4756 btrfs_release_path(path);
4758 /* Last extent ends at i_size. */
4762 hole_size = ALIGN(hole_size, fs_info->sectorsize);
4763 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4764 hole_size, 0, hole_size, 0, 0, 0);
4769 * When we are logging a new inode X, check if it doesn't have a reference that
4770 * matches the reference from some other inode Y created in a past transaction
4771 * and that was renamed in the current transaction. If we don't do this, then at
4772 * log replay time we can lose inode Y (and all its files if it's a directory):
4775 * echo "hello world" > /mnt/x/foobar
4778 * mkdir /mnt/x # or touch /mnt/x
4779 * xfs_io -c fsync /mnt/x
4781 * mount fs, trigger log replay
4783 * After the log replay procedure, we would lose the first directory and all its
4784 * files (file foobar).
4785 * For the case where inode Y is not a directory we simply end up losing it:
4787 * echo "123" > /mnt/foo
4789 * mv /mnt/foo /mnt/bar
4790 * echo "abc" > /mnt/foo
4791 * xfs_io -c fsync /mnt/foo
4794 * We also need this for cases where a snapshot entry is replaced by some other
4795 * entry (file or directory) otherwise we end up with an unreplayable log due to
4796 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4797 * if it were a regular entry:
4800 * btrfs subvolume snapshot /mnt /mnt/x/snap
4801 * btrfs subvolume delete /mnt/x/snap
4804 * fsync /mnt/x or fsync some new file inside it
4807 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4808 * the same transaction.
4810 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4812 const struct btrfs_key *key,
4813 struct btrfs_inode *inode,
4814 u64 *other_ino, u64 *other_parent)
4817 struct btrfs_path *search_path;
4820 u32 item_size = btrfs_item_size_nr(eb, slot);
4822 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4824 search_path = btrfs_alloc_path();
4827 search_path->search_commit_root = 1;
4828 search_path->skip_locking = 1;
4830 while (cur_offset < item_size) {
4834 unsigned long name_ptr;
4835 struct btrfs_dir_item *di;
4837 if (key->type == BTRFS_INODE_REF_KEY) {
4838 struct btrfs_inode_ref *iref;
4840 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4841 parent = key->offset;
4842 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4843 name_ptr = (unsigned long)(iref + 1);
4844 this_len = sizeof(*iref) + this_name_len;
4846 struct btrfs_inode_extref *extref;
4848 extref = (struct btrfs_inode_extref *)(ptr +
4850 parent = btrfs_inode_extref_parent(eb, extref);
4851 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4852 name_ptr = (unsigned long)&extref->name;
4853 this_len = sizeof(*extref) + this_name_len;
4856 if (this_name_len > name_len) {
4859 new_name = krealloc(name, this_name_len, GFP_NOFS);
4864 name_len = this_name_len;
4868 read_extent_buffer(eb, name, name_ptr, this_name_len);
4869 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4870 parent, name, this_name_len, 0);
4871 if (di && !IS_ERR(di)) {
4872 struct btrfs_key di_key;
4874 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4876 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4877 if (di_key.objectid != key->objectid) {
4879 *other_ino = di_key.objectid;
4880 *other_parent = parent;
4888 } else if (IS_ERR(di)) {
4892 btrfs_release_path(search_path);
4894 cur_offset += this_len;
4898 btrfs_free_path(search_path);
4903 struct btrfs_ino_list {
4906 struct list_head list;
4909 static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
4910 struct btrfs_root *root,
4911 struct btrfs_path *path,
4912 struct btrfs_log_ctx *ctx,
4913 u64 ino, u64 parent)
4915 struct btrfs_ino_list *ino_elem;
4916 LIST_HEAD(inode_list);
4919 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
4922 ino_elem->ino = ino;
4923 ino_elem->parent = parent;
4924 list_add_tail(&ino_elem->list, &inode_list);
4926 while (!list_empty(&inode_list)) {
4927 struct btrfs_fs_info *fs_info = root->fs_info;
4928 struct btrfs_key key;
4929 struct inode *inode;
4931 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
4933 ino = ino_elem->ino;
4934 parent = ino_elem->parent;
4935 list_del(&ino_elem->list);
4940 btrfs_release_path(path);
4943 key.type = BTRFS_INODE_ITEM_KEY;
4945 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4947 * If the other inode that had a conflicting dir entry was
4948 * deleted in the current transaction, we need to log its parent
4951 if (IS_ERR(inode)) {
4952 ret = PTR_ERR(inode);
4953 if (ret == -ENOENT) {
4954 key.objectid = parent;
4955 inode = btrfs_iget(fs_info->sb, &key, root,
4957 if (IS_ERR(inode)) {
4958 ret = PTR_ERR(inode);
4960 ret = btrfs_log_inode(trans, root,
4962 LOG_OTHER_INODE_ALL,
4970 * We are safe logging the other inode without acquiring its
4971 * lock as long as we log with the LOG_INODE_EXISTS mode. We
4972 * are safe against concurrent renames of the other inode as
4973 * well because during a rename we pin the log and update the
4974 * log with the new name before we unpin it.
4976 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
4977 LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
4984 key.type = BTRFS_INODE_REF_KEY;
4986 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4993 struct extent_buffer *leaf = path->nodes[0];
4994 int slot = path->slots[0];
4996 u64 other_parent = 0;
4998 if (slot >= btrfs_header_nritems(leaf)) {
4999 ret = btrfs_next_leaf(root, path);
5002 } else if (ret > 0) {
5009 btrfs_item_key_to_cpu(leaf, &key, slot);
5010 if (key.objectid != ino ||
5011 (key.type != BTRFS_INODE_REF_KEY &&
5012 key.type != BTRFS_INODE_EXTREF_KEY)) {
5017 ret = btrfs_check_ref_name_override(leaf, slot, &key,
5018 BTRFS_I(inode), &other_ino,
5023 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5028 ino_elem->ino = other_ino;
5029 ino_elem->parent = other_parent;
5030 list_add_tail(&ino_elem->list, &inode_list);
5041 /* log a single inode in the tree log.
5042 * At least one parent directory for this inode must exist in the tree
5043 * or be logged already.
5045 * Any items from this inode changed by the current transaction are copied
5046 * to the log tree. An extra reference is taken on any extents in this
5047 * file, allowing us to avoid a whole pile of corner cases around logging
5048 * blocks that have been removed from the tree.
5050 * See LOG_INODE_ALL and related defines for a description of what inode_only
5053 * This handles both files and directories.
5055 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5056 struct btrfs_root *root, struct btrfs_inode *inode,
5060 struct btrfs_log_ctx *ctx)
5062 struct btrfs_fs_info *fs_info = root->fs_info;
5063 struct btrfs_path *path;
5064 struct btrfs_path *dst_path;
5065 struct btrfs_key min_key;
5066 struct btrfs_key max_key;
5067 struct btrfs_root *log = root->log_root;
5068 u64 last_extent = 0;
5072 int ins_start_slot = 0;
5074 bool fast_search = false;
5075 u64 ino = btrfs_ino(inode);
5076 struct extent_map_tree *em_tree = &inode->extent_tree;
5077 u64 logged_isize = 0;
5078 bool need_log_inode_item = true;
5079 bool xattrs_logged = false;
5080 bool recursive_logging = false;
5082 path = btrfs_alloc_path();
5085 dst_path = btrfs_alloc_path();
5087 btrfs_free_path(path);
5091 min_key.objectid = ino;
5092 min_key.type = BTRFS_INODE_ITEM_KEY;
5095 max_key.objectid = ino;
5098 /* today the code can only do partial logging of directories */
5099 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5100 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5101 &inode->runtime_flags) &&
5102 inode_only >= LOG_INODE_EXISTS))
5103 max_key.type = BTRFS_XATTR_ITEM_KEY;
5105 max_key.type = (u8)-1;
5106 max_key.offset = (u64)-1;
5109 * Only run delayed items if we are a dir or a new file.
5110 * Otherwise commit the delayed inode only, which is needed in
5111 * order for the log replay code to mark inodes for link count
5112 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
5114 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5115 inode->generation > fs_info->last_trans_committed)
5116 ret = btrfs_commit_inode_delayed_items(trans, inode);
5118 ret = btrfs_commit_inode_delayed_inode(inode);
5121 btrfs_free_path(path);
5122 btrfs_free_path(dst_path);
5126 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
5127 recursive_logging = true;
5128 if (inode_only == LOG_OTHER_INODE)
5129 inode_only = LOG_INODE_EXISTS;
5131 inode_only = LOG_INODE_ALL;
5132 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5134 mutex_lock(&inode->log_mutex);
5138 * a brute force approach to making sure we get the most uptodate
5139 * copies of everything.
5141 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5142 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5144 if (inode_only == LOG_INODE_EXISTS)
5145 max_key_type = BTRFS_XATTR_ITEM_KEY;
5146 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
5148 if (inode_only == LOG_INODE_EXISTS) {
5150 * Make sure the new inode item we write to the log has
5151 * the same isize as the current one (if it exists).
5152 * This is necessary to prevent data loss after log
5153 * replay, and also to prevent doing a wrong expanding
5154 * truncate - for e.g. create file, write 4K into offset
5155 * 0, fsync, write 4K into offset 4096, add hard link,
5156 * fsync some other file (to sync log), power fail - if
5157 * we use the inode's current i_size, after log replay
5158 * we get a 8Kb file, with the last 4Kb extent as a hole
5159 * (zeroes), as if an expanding truncate happened,
5160 * instead of getting a file of 4Kb only.
5162 err = logged_inode_size(log, inode, path, &logged_isize);
5166 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5167 &inode->runtime_flags)) {
5168 if (inode_only == LOG_INODE_EXISTS) {
5169 max_key.type = BTRFS_XATTR_ITEM_KEY;
5170 ret = drop_objectid_items(trans, log, path, ino,
5173 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5174 &inode->runtime_flags);
5175 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5176 &inode->runtime_flags);
5178 ret = btrfs_truncate_inode_items(trans,
5179 log, &inode->vfs_inode, 0, 0);
5184 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5185 &inode->runtime_flags) ||
5186 inode_only == LOG_INODE_EXISTS) {
5187 if (inode_only == LOG_INODE_ALL)
5189 max_key.type = BTRFS_XATTR_ITEM_KEY;
5190 ret = drop_objectid_items(trans, log, path, ino,
5193 if (inode_only == LOG_INODE_ALL)
5206 ret = btrfs_search_forward(root, &min_key,
5207 path, trans->transid);
5215 /* note, ins_nr might be > 0 here, cleanup outside the loop */
5216 if (min_key.objectid != ino)
5218 if (min_key.type > max_key.type)
5221 if (min_key.type == BTRFS_INODE_ITEM_KEY)
5222 need_log_inode_item = false;
5224 if ((min_key.type == BTRFS_INODE_REF_KEY ||
5225 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
5226 inode->generation == trans->transid &&
5227 !recursive_logging) {
5229 u64 other_parent = 0;
5231 ret = btrfs_check_ref_name_override(path->nodes[0],
5232 path->slots[0], &min_key, inode,
5233 &other_ino, &other_parent);
5237 } else if (ret > 0 && ctx &&
5238 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5243 ins_start_slot = path->slots[0];
5245 ret = copy_items(trans, inode, dst_path, path,
5246 &last_extent, ins_start_slot,
5255 err = log_conflicting_inodes(trans, root, path,
5256 ctx, other_ino, other_parent);
5259 btrfs_release_path(path);
5264 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5265 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
5268 ret = copy_items(trans, inode, dst_path, path,
5269 &last_extent, ins_start_slot,
5270 ins_nr, inode_only, logged_isize);
5277 btrfs_release_path(path);
5283 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5286 } else if (!ins_nr) {
5287 ins_start_slot = path->slots[0];
5292 ret = copy_items(trans, inode, dst_path, path, &last_extent,
5293 ins_start_slot, ins_nr, inode_only,
5301 btrfs_release_path(path);
5305 ins_start_slot = path->slots[0];
5308 nritems = btrfs_header_nritems(path->nodes[0]);
5310 if (path->slots[0] < nritems) {
5311 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
5316 ret = copy_items(trans, inode, dst_path, path,
5317 &last_extent, ins_start_slot,
5318 ins_nr, inode_only, logged_isize);
5326 btrfs_release_path(path);
5328 if (min_key.offset < (u64)-1) {
5330 } else if (min_key.type < max_key.type) {
5338 ret = copy_items(trans, inode, dst_path, path, &last_extent,
5339 ins_start_slot, ins_nr, inode_only,
5349 btrfs_release_path(path);
5350 btrfs_release_path(dst_path);
5351 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5354 xattrs_logged = true;
5355 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5356 btrfs_release_path(path);
5357 btrfs_release_path(dst_path);
5358 err = btrfs_log_trailing_hole(trans, root, inode, path);
5363 btrfs_release_path(path);
5364 btrfs_release_path(dst_path);
5365 if (need_log_inode_item) {
5366 err = log_inode_item(trans, log, dst_path, inode);
5367 if (!err && !xattrs_logged) {
5368 err = btrfs_log_all_xattrs(trans, root, inode, path,
5370 btrfs_release_path(path);
5376 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5382 } else if (inode_only == LOG_INODE_ALL) {
5383 struct extent_map *em, *n;
5385 write_lock(&em_tree->lock);
5387 * We can't just remove every em if we're called for a ranged
5388 * fsync - that is, one that doesn't cover the whole possible
5389 * file range (0 to LLONG_MAX). This is because we can have
5390 * em's that fall outside the range we're logging and therefore
5391 * their ordered operations haven't completed yet
5392 * (btrfs_finish_ordered_io() not invoked yet). This means we
5393 * didn't get their respective file extent item in the fs/subvol
5394 * tree yet, and need to let the next fast fsync (one which
5395 * consults the list of modified extent maps) find the em so
5396 * that it logs a matching file extent item and waits for the
5397 * respective ordered operation to complete (if it's still
5400 * Removing every em outside the range we're logging would make
5401 * the next fast fsync not log their matching file extent items,
5402 * therefore making us lose data after a log replay.
5404 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
5406 const u64 mod_end = em->mod_start + em->mod_len - 1;
5408 if (em->mod_start >= start && mod_end <= end)
5409 list_del_init(&em->list);
5411 write_unlock(&em_tree->lock);
5414 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5415 ret = log_directory_changes(trans, root, inode, path, dst_path,
5423 spin_lock(&inode->lock);
5424 inode->logged_trans = trans->transid;
5425 inode->last_log_commit = inode->last_sub_trans;
5426 spin_unlock(&inode->lock);
5428 mutex_unlock(&inode->log_mutex);
5430 btrfs_free_path(path);
5431 btrfs_free_path(dst_path);
5436 * Check if we must fallback to a transaction commit when logging an inode.
5437 * This must be called after logging the inode and is used only in the context
5438 * when fsyncing an inode requires the need to log some other inode - in which
5439 * case we can't lock the i_mutex of each other inode we need to log as that
5440 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5441 * log inodes up or down in the hierarchy) or rename operations for example. So
5442 * we take the log_mutex of the inode after we have logged it and then check for
5443 * its last_unlink_trans value - this is safe because any task setting
5444 * last_unlink_trans must take the log_mutex and it must do this before it does
5445 * the actual unlink operation, so if we do this check before a concurrent task
5446 * sets last_unlink_trans it means we've logged a consistent version/state of
5447 * all the inode items, otherwise we are not sure and must do a transaction
5448 * commit (the concurrent task might have only updated last_unlink_trans before
5449 * we logged the inode or it might have also done the unlink).
5451 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5452 struct btrfs_inode *inode)
5454 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5457 mutex_lock(&inode->log_mutex);
5458 if (inode->last_unlink_trans > fs_info->last_trans_committed) {
5460 * Make sure any commits to the log are forced to be full
5463 btrfs_set_log_full_commit(trans);
5466 mutex_unlock(&inode->log_mutex);
5472 * follow the dentry parent pointers up the chain and see if any
5473 * of the directories in it require a full commit before they can
5474 * be logged. Returns zero if nothing special needs to be done or 1 if
5475 * a full commit is required.
5477 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5478 struct btrfs_inode *inode,
5479 struct dentry *parent,
5480 struct super_block *sb,
5484 struct dentry *old_parent = NULL;
5487 * for regular files, if its inode is already on disk, we don't
5488 * have to worry about the parents at all. This is because
5489 * we can use the last_unlink_trans field to record renames
5490 * and other fun in this file.
5492 if (S_ISREG(inode->vfs_inode.i_mode) &&
5493 inode->generation <= last_committed &&
5494 inode->last_unlink_trans <= last_committed)
5497 if (!S_ISDIR(inode->vfs_inode.i_mode)) {
5498 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5500 inode = BTRFS_I(d_inode(parent));
5504 if (btrfs_must_commit_transaction(trans, inode)) {
5509 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5512 if (IS_ROOT(parent)) {
5513 inode = BTRFS_I(d_inode(parent));
5514 if (btrfs_must_commit_transaction(trans, inode))
5519 parent = dget_parent(parent);
5521 old_parent = parent;
5522 inode = BTRFS_I(d_inode(parent));
5530 struct btrfs_dir_list {
5532 struct list_head list;
5536 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5537 * details about the why it is needed.
5538 * This is a recursive operation - if an existing dentry corresponds to a
5539 * directory, that directory's new entries are logged too (same behaviour as
5540 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5541 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5542 * complains about the following circular lock dependency / possible deadlock:
5546 * lock(&type->i_mutex_dir_key#3/2);
5547 * lock(sb_internal#2);
5548 * lock(&type->i_mutex_dir_key#3/2);
5549 * lock(&sb->s_type->i_mutex_key#14);
5551 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5552 * sb_start_intwrite() in btrfs_start_transaction().
5553 * Not locking i_mutex of the inodes is still safe because:
5555 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5556 * that while logging the inode new references (names) are added or removed
5557 * from the inode, leaving the logged inode item with a link count that does
5558 * not match the number of logged inode reference items. This is fine because
5559 * at log replay time we compute the real number of links and correct the
5560 * link count in the inode item (see replay_one_buffer() and
5561 * link_to_fixup_dir());
5563 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5564 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5565 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5566 * has a size that doesn't match the sum of the lengths of all the logged
5567 * names. This does not result in a problem because if a dir_item key is
5568 * logged but its matching dir_index key is not logged, at log replay time we
5569 * don't use it to replay the respective name (see replay_one_name()). On the
5570 * other hand if only the dir_index key ends up being logged, the respective
5571 * name is added to the fs/subvol tree with both the dir_item and dir_index
5572 * keys created (see replay_one_name()).
5573 * The directory's inode item with a wrong i_size is not a problem as well,
5574 * since we don't use it at log replay time to set the i_size in the inode
5575 * item of the fs/subvol tree (see overwrite_item()).
5577 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5578 struct btrfs_root *root,
5579 struct btrfs_inode *start_inode,
5580 struct btrfs_log_ctx *ctx)
5582 struct btrfs_fs_info *fs_info = root->fs_info;
5583 struct btrfs_root *log = root->log_root;
5584 struct btrfs_path *path;
5585 LIST_HEAD(dir_list);
5586 struct btrfs_dir_list *dir_elem;
5589 path = btrfs_alloc_path();
5593 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5595 btrfs_free_path(path);
5598 dir_elem->ino = btrfs_ino(start_inode);
5599 list_add_tail(&dir_elem->list, &dir_list);
5601 while (!list_empty(&dir_list)) {
5602 struct extent_buffer *leaf;
5603 struct btrfs_key min_key;
5607 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5610 goto next_dir_inode;
5612 min_key.objectid = dir_elem->ino;
5613 min_key.type = BTRFS_DIR_ITEM_KEY;
5616 btrfs_release_path(path);
5617 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5619 goto next_dir_inode;
5620 } else if (ret > 0) {
5622 goto next_dir_inode;
5626 leaf = path->nodes[0];
5627 nritems = btrfs_header_nritems(leaf);
5628 for (i = path->slots[0]; i < nritems; i++) {
5629 struct btrfs_dir_item *di;
5630 struct btrfs_key di_key;
5631 struct inode *di_inode;
5632 struct btrfs_dir_list *new_dir_elem;
5633 int log_mode = LOG_INODE_EXISTS;
5636 btrfs_item_key_to_cpu(leaf, &min_key, i);
5637 if (min_key.objectid != dir_elem->ino ||
5638 min_key.type != BTRFS_DIR_ITEM_KEY)
5639 goto next_dir_inode;
5641 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5642 type = btrfs_dir_type(leaf, di);
5643 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5644 type != BTRFS_FT_DIR)
5646 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5647 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5650 btrfs_release_path(path);
5651 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
5652 if (IS_ERR(di_inode)) {
5653 ret = PTR_ERR(di_inode);
5654 goto next_dir_inode;
5657 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5662 ctx->log_new_dentries = false;
5663 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5664 log_mode = LOG_INODE_ALL;
5665 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5666 log_mode, 0, LLONG_MAX, ctx);
5668 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5672 goto next_dir_inode;
5673 if (ctx->log_new_dentries) {
5674 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5676 if (!new_dir_elem) {
5678 goto next_dir_inode;
5680 new_dir_elem->ino = di_key.objectid;
5681 list_add_tail(&new_dir_elem->list, &dir_list);
5686 ret = btrfs_next_leaf(log, path);
5688 goto next_dir_inode;
5689 } else if (ret > 0) {
5691 goto next_dir_inode;
5695 if (min_key.offset < (u64)-1) {
5700 list_del(&dir_elem->list);
5704 btrfs_free_path(path);
5708 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5709 struct btrfs_inode *inode,
5710 struct btrfs_log_ctx *ctx)
5712 struct btrfs_fs_info *fs_info = trans->fs_info;
5714 struct btrfs_path *path;
5715 struct btrfs_key key;
5716 struct btrfs_root *root = inode->root;
5717 const u64 ino = btrfs_ino(inode);
5719 path = btrfs_alloc_path();
5722 path->skip_locking = 1;
5723 path->search_commit_root = 1;
5726 key.type = BTRFS_INODE_REF_KEY;
5728 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5733 struct extent_buffer *leaf = path->nodes[0];
5734 int slot = path->slots[0];
5739 if (slot >= btrfs_header_nritems(leaf)) {
5740 ret = btrfs_next_leaf(root, path);
5748 btrfs_item_key_to_cpu(leaf, &key, slot);
5749 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5750 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5753 item_size = btrfs_item_size_nr(leaf, slot);
5754 ptr = btrfs_item_ptr_offset(leaf, slot);
5755 while (cur_offset < item_size) {
5756 struct btrfs_key inode_key;
5757 struct inode *dir_inode;
5759 inode_key.type = BTRFS_INODE_ITEM_KEY;
5760 inode_key.offset = 0;
5762 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5763 struct btrfs_inode_extref *extref;
5765 extref = (struct btrfs_inode_extref *)
5767 inode_key.objectid = btrfs_inode_extref_parent(
5769 cur_offset += sizeof(*extref);
5770 cur_offset += btrfs_inode_extref_name_len(leaf,
5773 inode_key.objectid = key.offset;
5774 cur_offset = item_size;
5777 dir_inode = btrfs_iget(fs_info->sb, &inode_key,
5780 * If the parent inode was deleted, return an error to
5781 * fallback to a transaction commit. This is to prevent
5782 * getting an inode that was moved from one parent A to
5783 * a parent B, got its former parent A deleted and then
5784 * it got fsync'ed, from existing at both parents after
5785 * a log replay (and the old parent still existing).
5792 * mv /mnt/B/bar /mnt/A/bar
5793 * mv -T /mnt/A /mnt/B
5797 * If we ignore the old parent B which got deleted,
5798 * after a log replay we would have file bar linked
5799 * at both parents and the old parent B would still
5802 if (IS_ERR(dir_inode)) {
5803 ret = PTR_ERR(dir_inode);
5808 ctx->log_new_dentries = false;
5809 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5810 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5812 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
5814 if (!ret && ctx && ctx->log_new_dentries)
5815 ret = log_new_dir_dentries(trans, root,
5816 BTRFS_I(dir_inode), ctx);
5825 btrfs_free_path(path);
5829 static int log_new_ancestors(struct btrfs_trans_handle *trans,
5830 struct btrfs_root *root,
5831 struct btrfs_path *path,
5832 struct btrfs_log_ctx *ctx)
5834 struct btrfs_key found_key;
5836 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
5839 struct btrfs_fs_info *fs_info = root->fs_info;
5840 const u64 last_committed = fs_info->last_trans_committed;
5841 struct extent_buffer *leaf = path->nodes[0];
5842 int slot = path->slots[0];
5843 struct btrfs_key search_key;
5844 struct inode *inode;
5847 btrfs_release_path(path);
5849 search_key.objectid = found_key.offset;
5850 search_key.type = BTRFS_INODE_ITEM_KEY;
5851 search_key.offset = 0;
5852 inode = btrfs_iget(fs_info->sb, &search_key, root, NULL);
5854 return PTR_ERR(inode);
5856 if (BTRFS_I(inode)->generation > last_committed)
5857 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5864 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID)
5867 search_key.type = BTRFS_INODE_REF_KEY;
5868 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
5872 leaf = path->nodes[0];
5873 slot = path->slots[0];
5874 if (slot >= btrfs_header_nritems(leaf)) {
5875 ret = btrfs_next_leaf(root, path);
5880 leaf = path->nodes[0];
5881 slot = path->slots[0];
5884 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5885 if (found_key.objectid != search_key.objectid ||
5886 found_key.type != BTRFS_INODE_REF_KEY)
5892 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
5893 struct btrfs_inode *inode,
5894 struct dentry *parent,
5895 struct btrfs_log_ctx *ctx)
5897 struct btrfs_root *root = inode->root;
5898 struct btrfs_fs_info *fs_info = root->fs_info;
5899 struct dentry *old_parent = NULL;
5900 struct super_block *sb = inode->vfs_inode.i_sb;
5904 if (!parent || d_really_is_negative(parent) ||
5908 inode = BTRFS_I(d_inode(parent));
5909 if (root != inode->root)
5912 if (inode->generation > fs_info->last_trans_committed) {
5913 ret = btrfs_log_inode(trans, root, inode,
5914 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx);
5918 if (IS_ROOT(parent))
5921 parent = dget_parent(parent);
5923 old_parent = parent;
5930 static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
5931 struct btrfs_inode *inode,
5932 struct dentry *parent,
5933 struct btrfs_log_ctx *ctx)
5935 struct btrfs_root *root = inode->root;
5936 const u64 ino = btrfs_ino(inode);
5937 struct btrfs_path *path;
5938 struct btrfs_key search_key;
5942 * For a single hard link case, go through a fast path that does not
5943 * need to iterate the fs/subvolume tree.
5945 if (inode->vfs_inode.i_nlink < 2)
5946 return log_new_ancestors_fast(trans, inode, parent, ctx);
5948 path = btrfs_alloc_path();
5952 search_key.objectid = ino;
5953 search_key.type = BTRFS_INODE_REF_KEY;
5954 search_key.offset = 0;
5956 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
5963 struct extent_buffer *leaf = path->nodes[0];
5964 int slot = path->slots[0];
5965 struct btrfs_key found_key;
5967 if (slot >= btrfs_header_nritems(leaf)) {
5968 ret = btrfs_next_leaf(root, path);
5976 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5977 if (found_key.objectid != ino ||
5978 found_key.type > BTRFS_INODE_EXTREF_KEY)
5982 * Don't deal with extended references because they are rare
5983 * cases and too complex to deal with (we would need to keep
5984 * track of which subitem we are processing for each item in
5985 * this loop, etc). So just return some error to fallback to
5986 * a transaction commit.
5988 if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
5994 * Logging ancestors needs to do more searches on the fs/subvol
5995 * tree, so it releases the path as needed to avoid deadlocks.
5996 * Keep track of the last inode ref key and resume from that key
5997 * after logging all new ancestors for the current hard link.
5999 memcpy(&search_key, &found_key, sizeof(search_key));
6001 ret = log_new_ancestors(trans, root, path, ctx);
6004 btrfs_release_path(path);
6009 btrfs_free_path(path);
6014 * helper function around btrfs_log_inode to make sure newly created
6015 * parent directories also end up in the log. A minimal inode and backref
6016 * only logging is done of any parent directories that are older than
6017 * the last committed transaction
6019 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
6020 struct btrfs_inode *inode,
6021 struct dentry *parent,
6025 struct btrfs_log_ctx *ctx)
6027 struct btrfs_root *root = inode->root;
6028 struct btrfs_fs_info *fs_info = root->fs_info;
6029 struct super_block *sb;
6031 u64 last_committed = fs_info->last_trans_committed;
6032 bool log_dentries = false;
6034 sb = inode->vfs_inode.i_sb;
6036 if (btrfs_test_opt(fs_info, NOTREELOG)) {
6042 * The prev transaction commit doesn't complete, we need do
6043 * full commit by ourselves.
6045 if (fs_info->last_trans_log_full_commit >
6046 fs_info->last_trans_committed) {
6051 if (btrfs_root_refs(&root->root_item) == 0) {
6056 ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
6062 * Skip already logged inodes or inodes corresponding to tmpfiles
6063 * (since logging them is pointless, a link count of 0 means they
6064 * will never be accessible).
6066 if (btrfs_inode_in_log(inode, trans->transid) ||
6067 inode->vfs_inode.i_nlink == 0) {
6068 ret = BTRFS_NO_LOG_SYNC;
6072 ret = start_log_trans(trans, root, ctx);
6076 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
6081 * for regular files, if its inode is already on disk, we don't
6082 * have to worry about the parents at all. This is because
6083 * we can use the last_unlink_trans field to record renames
6084 * and other fun in this file.
6086 if (S_ISREG(inode->vfs_inode.i_mode) &&
6087 inode->generation <= last_committed &&
6088 inode->last_unlink_trans <= last_committed) {
6093 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
6094 log_dentries = true;
6097 * On unlink we must make sure all our current and old parent directory
6098 * inodes are fully logged. This is to prevent leaving dangling
6099 * directory index entries in directories that were our parents but are
6100 * not anymore. Not doing this results in old parent directory being
6101 * impossible to delete after log replay (rmdir will always fail with
6102 * error -ENOTEMPTY).
6108 * ln testdir/foo testdir/bar
6110 * unlink testdir/bar
6111 * xfs_io -c fsync testdir/foo
6113 * mount fs, triggers log replay
6115 * If we don't log the parent directory (testdir), after log replay the
6116 * directory still has an entry pointing to the file inode using the bar
6117 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
6118 * the file inode has a link count of 1.
6124 * ln foo testdir/foo2
6125 * ln foo testdir/foo3
6127 * unlink testdir/foo3
6128 * xfs_io -c fsync foo
6130 * mount fs, triggers log replay
6132 * Similar as the first example, after log replay the parent directory
6133 * testdir still has an entry pointing to the inode file with name foo3
6134 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
6135 * and has a link count of 2.
6137 if (inode->last_unlink_trans > last_committed) {
6138 ret = btrfs_log_all_parents(trans, inode, ctx);
6143 ret = log_all_new_ancestors(trans, inode, parent, ctx);
6148 ret = log_new_dir_dentries(trans, root, inode, ctx);
6153 btrfs_set_log_full_commit(trans);
6158 btrfs_remove_log_ctx(root, ctx);
6159 btrfs_end_log_trans(root);
6165 * it is not safe to log dentry if the chunk root has added new
6166 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
6167 * If this returns 1, you must commit the transaction to safely get your
6170 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
6171 struct dentry *dentry,
6174 struct btrfs_log_ctx *ctx)
6176 struct dentry *parent = dget_parent(dentry);
6179 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
6180 start, end, LOG_INODE_ALL, ctx);
6187 * should be called during mount to recover any replay any log trees
6190 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
6193 struct btrfs_path *path;
6194 struct btrfs_trans_handle *trans;
6195 struct btrfs_key key;
6196 struct btrfs_key found_key;
6197 struct btrfs_key tmp_key;
6198 struct btrfs_root *log;
6199 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6200 struct walk_control wc = {
6201 .process_func = process_one_buffer,
6205 path = btrfs_alloc_path();
6209 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6211 trans = btrfs_start_transaction(fs_info->tree_root, 0);
6212 if (IS_ERR(trans)) {
6213 ret = PTR_ERR(trans);
6220 ret = walk_log_tree(trans, log_root_tree, &wc);
6222 btrfs_handle_fs_error(fs_info, ret,
6223 "Failed to pin buffers while recovering log root tree.");
6228 key.objectid = BTRFS_TREE_LOG_OBJECTID;
6229 key.offset = (u64)-1;
6230 key.type = BTRFS_ROOT_ITEM_KEY;
6233 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6236 btrfs_handle_fs_error(fs_info, ret,
6237 "Couldn't find tree log root.");
6241 if (path->slots[0] == 0)
6245 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6247 btrfs_release_path(path);
6248 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6251 log = btrfs_read_fs_root(log_root_tree, &found_key);
6254 btrfs_handle_fs_error(fs_info, ret,
6255 "Couldn't read tree log root.");
6259 tmp_key.objectid = found_key.offset;
6260 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
6261 tmp_key.offset = (u64)-1;
6263 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
6264 if (IS_ERR(wc.replay_dest)) {
6265 ret = PTR_ERR(wc.replay_dest);
6266 free_extent_buffer(log->node);
6267 free_extent_buffer(log->commit_root);
6269 btrfs_handle_fs_error(fs_info, ret,
6270 "Couldn't read target root for tree log recovery.");
6274 wc.replay_dest->log_root = log;
6275 btrfs_record_root_in_trans(trans, wc.replay_dest);
6276 ret = walk_log_tree(trans, log, &wc);
6278 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6279 ret = fixup_inode_link_counts(trans, wc.replay_dest,
6283 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6284 struct btrfs_root *root = wc.replay_dest;
6286 btrfs_release_path(path);
6289 * We have just replayed everything, and the highest
6290 * objectid of fs roots probably has changed in case
6291 * some inode_item's got replayed.
6293 * root->objectid_mutex is not acquired as log replay
6294 * could only happen during mount.
6296 ret = btrfs_find_highest_objectid(root,
6297 &root->highest_objectid);
6300 key.offset = found_key.offset - 1;
6301 wc.replay_dest->log_root = NULL;
6302 free_extent_buffer(log->node);
6303 free_extent_buffer(log->commit_root);
6309 if (found_key.offset == 0)
6312 btrfs_release_path(path);
6314 /* step one is to pin it all, step two is to replay just inodes */
6317 wc.process_func = replay_one_buffer;
6318 wc.stage = LOG_WALK_REPLAY_INODES;
6321 /* step three is to replay everything */
6322 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6327 btrfs_free_path(path);
6329 /* step 4: commit the transaction, which also unpins the blocks */
6330 ret = btrfs_commit_transaction(trans);
6334 free_extent_buffer(log_root_tree->node);
6335 log_root_tree->log_root = NULL;
6336 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6337 kfree(log_root_tree);
6342 btrfs_end_transaction(wc.trans);
6343 btrfs_free_path(path);
6348 * there are some corner cases where we want to force a full
6349 * commit instead of allowing a directory to be logged.
6351 * They revolve around files there were unlinked from the directory, and
6352 * this function updates the parent directory so that a full commit is
6353 * properly done if it is fsync'd later after the unlinks are done.
6355 * Must be called before the unlink operations (updates to the subvolume tree,
6356 * inodes, etc) are done.
6358 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6359 struct btrfs_inode *dir, struct btrfs_inode *inode,
6363 * when we're logging a file, if it hasn't been renamed
6364 * or unlinked, and its inode is fully committed on disk,
6365 * we don't have to worry about walking up the directory chain
6366 * to log its parents.
6368 * So, we use the last_unlink_trans field to put this transid
6369 * into the file. When the file is logged we check it and
6370 * don't log the parents if the file is fully on disk.
6372 mutex_lock(&inode->log_mutex);
6373 inode->last_unlink_trans = trans->transid;
6374 mutex_unlock(&inode->log_mutex);
6377 * if this directory was already logged any new
6378 * names for this file/dir will get recorded
6380 if (dir->logged_trans == trans->transid)
6384 * if the inode we're about to unlink was logged,
6385 * the log will be properly updated for any new names
6387 if (inode->logged_trans == trans->transid)
6391 * when renaming files across directories, if the directory
6392 * there we're unlinking from gets fsync'd later on, there's
6393 * no way to find the destination directory later and fsync it
6394 * properly. So, we have to be conservative and force commits
6395 * so the new name gets discovered.
6400 /* we can safely do the unlink without any special recording */
6404 mutex_lock(&dir->log_mutex);
6405 dir->last_unlink_trans = trans->transid;
6406 mutex_unlock(&dir->log_mutex);
6410 * Make sure that if someone attempts to fsync the parent directory of a deleted
6411 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6412 * that after replaying the log tree of the parent directory's root we will not
6413 * see the snapshot anymore and at log replay time we will not see any log tree
6414 * corresponding to the deleted snapshot's root, which could lead to replaying
6415 * it after replaying the log tree of the parent directory (which would replay
6416 * the snapshot delete operation).
6418 * Must be called before the actual snapshot destroy operation (updates to the
6419 * parent root and tree of tree roots trees, etc) are done.
6421 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6422 struct btrfs_inode *dir)
6424 mutex_lock(&dir->log_mutex);
6425 dir->last_unlink_trans = trans->transid;
6426 mutex_unlock(&dir->log_mutex);
6430 * Call this after adding a new name for a file and it will properly
6431 * update the log to reflect the new name.
6433 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
6434 * true (because it's not used).
6436 * Return value depends on whether @sync_log is true or false.
6437 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6438 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
6440 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
6441 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
6442 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6443 * committed (without attempting to sync the log).
6445 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
6446 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
6447 struct dentry *parent,
6448 bool sync_log, struct btrfs_log_ctx *ctx)
6450 struct btrfs_fs_info *fs_info = trans->fs_info;
6454 * this will force the logging code to walk the dentry chain
6457 if (!S_ISDIR(inode->vfs_inode.i_mode))
6458 inode->last_unlink_trans = trans->transid;
6461 * if this inode hasn't been logged and directory we're renaming it
6462 * from hasn't been logged, we don't need to log it
6464 if (inode->logged_trans <= fs_info->last_trans_committed &&
6465 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
6466 return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
6467 BTRFS_DONT_NEED_LOG_SYNC;
6470 struct btrfs_log_ctx ctx2;
6472 btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
6473 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6474 LOG_INODE_EXISTS, &ctx2);
6475 if (ret == BTRFS_NO_LOG_SYNC)
6476 return BTRFS_DONT_NEED_TRANS_COMMIT;
6478 return BTRFS_NEED_TRANS_COMMIT;
6480 ret = btrfs_sync_log(trans, inode->root, &ctx2);
6482 return BTRFS_NEED_TRANS_COMMIT;
6483 return BTRFS_DONT_NEED_TRANS_COMMIT;
6487 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6488 LOG_INODE_EXISTS, ctx);
6489 if (ret == BTRFS_NO_LOG_SYNC)
6490 return BTRFS_DONT_NEED_LOG_SYNC;
6492 return BTRFS_NEED_TRANS_COMMIT;
6494 return BTRFS_NEED_LOG_SYNC;