1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
7 * Defines functions of journalling api
9 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h>
16 #include <linux/kthread.h>
17 #include <linux/time.h>
18 #include <linux/random.h>
19 #include <linux/delay.h>
21 #include <cluster/masklog.h>
26 #include "blockcheck.h"
29 #include "extent_map.h"
30 #include "heartbeat.h"
33 #include "localalloc.h"
42 #include "buffer_head_io.h"
43 #include "ocfs2_trace.h"
45 DEFINE_SPINLOCK(trans_inc_lock);
47 #define ORPHAN_SCAN_SCHEDULE_TIMEOUT 300000
49 static int ocfs2_force_read_journal(struct inode *inode);
50 static int ocfs2_recover_node(struct ocfs2_super *osb,
51 int node_num, int slot_num);
52 static int __ocfs2_recovery_thread(void *arg);
53 static int ocfs2_commit_cache(struct ocfs2_super *osb);
54 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota);
55 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
56 int dirty, int replayed);
57 static int ocfs2_trylock_journal(struct ocfs2_super *osb,
59 static int ocfs2_recover_orphans(struct ocfs2_super *osb,
61 enum ocfs2_orphan_reco_type orphan_reco_type);
62 static int ocfs2_commit_thread(void *arg);
63 static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
65 struct ocfs2_dinode *la_dinode,
66 struct ocfs2_dinode *tl_dinode,
67 struct ocfs2_quota_recovery *qrec,
68 enum ocfs2_orphan_reco_type orphan_reco_type);
70 static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb)
72 return __ocfs2_wait_on_mount(osb, 0);
75 static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb)
77 return __ocfs2_wait_on_mount(osb, 1);
81 * This replay_map is to track online/offline slots, so we could recover
82 * offline slots during recovery and mount
85 enum ocfs2_replay_state {
86 REPLAY_UNNEEDED = 0, /* Replay is not needed, so ignore this map */
87 REPLAY_NEEDED, /* Replay slots marked in rm_replay_slots */
88 REPLAY_DONE /* Replay was already queued */
91 struct ocfs2_replay_map {
92 unsigned int rm_slots;
93 enum ocfs2_replay_state rm_state;
94 unsigned char rm_replay_slots[0];
97 static void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state)
102 /* If we've already queued the replay, we don't have any more to do */
103 if (osb->replay_map->rm_state == REPLAY_DONE)
106 osb->replay_map->rm_state = state;
109 int ocfs2_compute_replay_slots(struct ocfs2_super *osb)
111 struct ocfs2_replay_map *replay_map;
114 /* If replay map is already set, we don't do it again */
118 replay_map = kzalloc(sizeof(struct ocfs2_replay_map) +
119 (osb->max_slots * sizeof(char)), GFP_KERNEL);
126 spin_lock(&osb->osb_lock);
128 replay_map->rm_slots = osb->max_slots;
129 replay_map->rm_state = REPLAY_UNNEEDED;
131 /* set rm_replay_slots for offline slot(s) */
132 for (i = 0; i < replay_map->rm_slots; i++) {
133 if (ocfs2_slot_to_node_num_locked(osb, i, &node_num) == -ENOENT)
134 replay_map->rm_replay_slots[i] = 1;
137 osb->replay_map = replay_map;
138 spin_unlock(&osb->osb_lock);
142 static void ocfs2_queue_replay_slots(struct ocfs2_super *osb,
143 enum ocfs2_orphan_reco_type orphan_reco_type)
145 struct ocfs2_replay_map *replay_map = osb->replay_map;
151 if (replay_map->rm_state != REPLAY_NEEDED)
154 for (i = 0; i < replay_map->rm_slots; i++)
155 if (replay_map->rm_replay_slots[i])
156 ocfs2_queue_recovery_completion(osb->journal, i, NULL,
159 replay_map->rm_state = REPLAY_DONE;
162 static void ocfs2_free_replay_slots(struct ocfs2_super *osb)
164 struct ocfs2_replay_map *replay_map = osb->replay_map;
166 if (!osb->replay_map)
170 osb->replay_map = NULL;
173 int ocfs2_recovery_init(struct ocfs2_super *osb)
175 struct ocfs2_recovery_map *rm;
177 mutex_init(&osb->recovery_lock);
178 osb->disable_recovery = 0;
179 osb->recovery_thread_task = NULL;
180 init_waitqueue_head(&osb->recovery_event);
182 rm = kzalloc(sizeof(struct ocfs2_recovery_map) +
183 osb->max_slots * sizeof(unsigned int),
190 rm->rm_entries = (unsigned int *)((char *)rm +
191 sizeof(struct ocfs2_recovery_map));
192 osb->recovery_map = rm;
197 /* we can't grab the goofy sem lock from inside wait_event, so we use
198 * memory barriers to make sure that we'll see the null task before
200 static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
203 return osb->recovery_thread_task != NULL;
206 void ocfs2_recovery_exit(struct ocfs2_super *osb)
208 struct ocfs2_recovery_map *rm;
210 /* disable any new recovery threads and wait for any currently
211 * running ones to exit. Do this before setting the vol_state. */
212 mutex_lock(&osb->recovery_lock);
213 osb->disable_recovery = 1;
214 mutex_unlock(&osb->recovery_lock);
215 wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
217 /* At this point, we know that no more recovery threads can be
218 * launched, so wait for any recovery completion work to
221 flush_workqueue(osb->ocfs2_wq);
224 * Now that recovery is shut down, and the osb is about to be
225 * freed, the osb_lock is not taken here.
227 rm = osb->recovery_map;
228 /* XXX: Should we bug if there are dirty entries? */
233 static int __ocfs2_recovery_map_test(struct ocfs2_super *osb,
234 unsigned int node_num)
237 struct ocfs2_recovery_map *rm = osb->recovery_map;
239 assert_spin_locked(&osb->osb_lock);
241 for (i = 0; i < rm->rm_used; i++) {
242 if (rm->rm_entries[i] == node_num)
249 /* Behaves like test-and-set. Returns the previous value */
250 static int ocfs2_recovery_map_set(struct ocfs2_super *osb,
251 unsigned int node_num)
253 struct ocfs2_recovery_map *rm = osb->recovery_map;
255 spin_lock(&osb->osb_lock);
256 if (__ocfs2_recovery_map_test(osb, node_num)) {
257 spin_unlock(&osb->osb_lock);
261 /* XXX: Can this be exploited? Not from o2dlm... */
262 BUG_ON(rm->rm_used >= osb->max_slots);
264 rm->rm_entries[rm->rm_used] = node_num;
266 spin_unlock(&osb->osb_lock);
271 static void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
272 unsigned int node_num)
275 struct ocfs2_recovery_map *rm = osb->recovery_map;
277 spin_lock(&osb->osb_lock);
279 for (i = 0; i < rm->rm_used; i++) {
280 if (rm->rm_entries[i] == node_num)
284 if (i < rm->rm_used) {
285 /* XXX: be careful with the pointer math */
286 memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]),
287 (rm->rm_used - i - 1) * sizeof(unsigned int));
291 spin_unlock(&osb->osb_lock);
294 static int ocfs2_commit_cache(struct ocfs2_super *osb)
297 unsigned int flushed;
298 struct ocfs2_journal *journal = NULL;
300 journal = osb->journal;
302 /* Flush all pending commits and checkpoint the journal. */
303 down_write(&journal->j_trans_barrier);
305 flushed = atomic_read(&journal->j_num_trans);
306 trace_ocfs2_commit_cache_begin(flushed);
308 up_write(&journal->j_trans_barrier);
312 jbd2_journal_lock_updates(journal->j_journal);
313 status = jbd2_journal_flush(journal->j_journal);
314 jbd2_journal_unlock_updates(journal->j_journal);
316 up_write(&journal->j_trans_barrier);
321 ocfs2_inc_trans_id(journal);
323 flushed = atomic_read(&journal->j_num_trans);
324 atomic_set(&journal->j_num_trans, 0);
325 up_write(&journal->j_trans_barrier);
327 trace_ocfs2_commit_cache_end(journal->j_trans_id, flushed);
329 ocfs2_wake_downconvert_thread(osb);
330 wake_up(&journal->j_checkpointed);
335 handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
337 journal_t *journal = osb->journal->j_journal;
340 BUG_ON(!osb || !osb->journal->j_journal);
342 if (ocfs2_is_hard_readonly(osb))
343 return ERR_PTR(-EROFS);
345 BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE);
346 BUG_ON(max_buffs <= 0);
348 /* Nested transaction? Just return the handle... */
349 if (journal_current_handle())
350 return jbd2_journal_start(journal, max_buffs);
352 sb_start_intwrite(osb->sb);
354 down_read(&osb->journal->j_trans_barrier);
356 handle = jbd2_journal_start(journal, max_buffs);
357 if (IS_ERR(handle)) {
358 up_read(&osb->journal->j_trans_barrier);
359 sb_end_intwrite(osb->sb);
361 mlog_errno(PTR_ERR(handle));
363 if (is_journal_aborted(journal)) {
364 ocfs2_abort(osb->sb, "Detected aborted journal\n");
365 handle = ERR_PTR(-EROFS);
368 if (!ocfs2_mount_local(osb))
369 atomic_inc(&(osb->journal->j_num_trans));
375 int ocfs2_commit_trans(struct ocfs2_super *osb,
379 struct ocfs2_journal *journal = osb->journal;
383 nested = handle->h_ref > 1;
384 ret = jbd2_journal_stop(handle);
389 up_read(&journal->j_trans_barrier);
390 sb_end_intwrite(osb->sb);
397 * 'nblocks' is what you want to add to the current transaction.
399 * This might call jbd2_journal_restart() which will commit dirty buffers
400 * and then restart the transaction. Before calling
401 * ocfs2_extend_trans(), any changed blocks should have been
402 * dirtied. After calling it, all blocks which need to be changed must
403 * go through another set of journal_access/journal_dirty calls.
405 * WARNING: This will not release any semaphores or disk locks taken
406 * during the transaction, so make sure they were taken *before*
407 * start_trans or we'll have ordering deadlocks.
409 * WARNING2: Note that we do *not* drop j_trans_barrier here. This is
410 * good because transaction ids haven't yet been recorded on the
411 * cluster locks associated with this handle.
413 int ocfs2_extend_trans(handle_t *handle, int nblocks)
415 int status, old_nblocks;
423 old_nblocks = handle->h_buffer_credits;
425 trace_ocfs2_extend_trans(old_nblocks, nblocks);
427 #ifdef CONFIG_OCFS2_DEBUG_FS
430 status = jbd2_journal_extend(handle, nblocks);
438 trace_ocfs2_extend_trans_restart(old_nblocks + nblocks);
439 status = jbd2_journal_restart(handle,
440 old_nblocks + nblocks);
453 * If we have fewer than thresh credits, extend by OCFS2_MAX_TRANS_DATA.
454 * If that fails, restart the transaction & regain write access for the
455 * buffer head which is used for metadata modifications.
456 * Taken from Ext4: extend_or_restart_transaction()
458 int ocfs2_allocate_extend_trans(handle_t *handle, int thresh)
460 int status, old_nblks;
464 old_nblks = handle->h_buffer_credits;
465 trace_ocfs2_allocate_extend_trans(old_nblks, thresh);
467 if (old_nblks < thresh)
470 status = jbd2_journal_extend(handle, OCFS2_MAX_TRANS_DATA);
477 status = jbd2_journal_restart(handle, OCFS2_MAX_TRANS_DATA);
487 struct ocfs2_triggers {
488 struct jbd2_buffer_trigger_type ot_triggers;
492 static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers)
494 return container_of(triggers, struct ocfs2_triggers, ot_triggers);
497 static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
498 struct buffer_head *bh,
499 void *data, size_t size)
501 struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers);
504 * We aren't guaranteed to have the superblock here, so we
505 * must unconditionally compute the ecc data.
506 * __ocfs2_journal_access() will only set the triggers if
507 * metaecc is enabled.
509 ocfs2_block_check_compute(data, size, data + ot->ot_offset);
513 * Quota blocks have their own trigger because the struct ocfs2_block_check
514 * offset depends on the blocksize.
516 static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
517 struct buffer_head *bh,
518 void *data, size_t size)
520 struct ocfs2_disk_dqtrailer *dqt =
521 ocfs2_block_dqtrailer(size, data);
524 * We aren't guaranteed to have the superblock here, so we
525 * must unconditionally compute the ecc data.
526 * __ocfs2_journal_access() will only set the triggers if
527 * metaecc is enabled.
529 ocfs2_block_check_compute(data, size, &dqt->dq_check);
533 * Directory blocks also have their own trigger because the
534 * struct ocfs2_block_check offset depends on the blocksize.
536 static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
537 struct buffer_head *bh,
538 void *data, size_t size)
540 struct ocfs2_dir_block_trailer *trailer =
541 ocfs2_dir_trailer_from_size(size, data);
544 * We aren't guaranteed to have the superblock here, so we
545 * must unconditionally compute the ecc data.
546 * __ocfs2_journal_access() will only set the triggers if
547 * metaecc is enabled.
549 ocfs2_block_check_compute(data, size, &trailer->db_check);
552 static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
553 struct buffer_head *bh)
556 "ocfs2_abort_trigger called by JBD2. bh = 0x%lx, "
557 "bh->b_blocknr = %llu\n",
559 (unsigned long long)bh->b_blocknr);
561 ocfs2_error(bh->b_bdev->bd_super,
562 "JBD2 has aborted our journal, ocfs2 cannot continue\n");
565 static struct ocfs2_triggers di_triggers = {
567 .t_frozen = ocfs2_frozen_trigger,
568 .t_abort = ocfs2_abort_trigger,
570 .ot_offset = offsetof(struct ocfs2_dinode, i_check),
573 static struct ocfs2_triggers eb_triggers = {
575 .t_frozen = ocfs2_frozen_trigger,
576 .t_abort = ocfs2_abort_trigger,
578 .ot_offset = offsetof(struct ocfs2_extent_block, h_check),
581 static struct ocfs2_triggers rb_triggers = {
583 .t_frozen = ocfs2_frozen_trigger,
584 .t_abort = ocfs2_abort_trigger,
586 .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check),
589 static struct ocfs2_triggers gd_triggers = {
591 .t_frozen = ocfs2_frozen_trigger,
592 .t_abort = ocfs2_abort_trigger,
594 .ot_offset = offsetof(struct ocfs2_group_desc, bg_check),
597 static struct ocfs2_triggers db_triggers = {
599 .t_frozen = ocfs2_db_frozen_trigger,
600 .t_abort = ocfs2_abort_trigger,
604 static struct ocfs2_triggers xb_triggers = {
606 .t_frozen = ocfs2_frozen_trigger,
607 .t_abort = ocfs2_abort_trigger,
609 .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check),
612 static struct ocfs2_triggers dq_triggers = {
614 .t_frozen = ocfs2_dq_frozen_trigger,
615 .t_abort = ocfs2_abort_trigger,
619 static struct ocfs2_triggers dr_triggers = {
621 .t_frozen = ocfs2_frozen_trigger,
622 .t_abort = ocfs2_abort_trigger,
624 .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check),
627 static struct ocfs2_triggers dl_triggers = {
629 .t_frozen = ocfs2_frozen_trigger,
630 .t_abort = ocfs2_abort_trigger,
632 .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check),
635 static int __ocfs2_journal_access(handle_t *handle,
636 struct ocfs2_caching_info *ci,
637 struct buffer_head *bh,
638 struct ocfs2_triggers *triggers,
642 struct ocfs2_super *osb =
643 OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
645 BUG_ON(!ci || !ci->ci_ops);
649 trace_ocfs2_journal_access(
650 (unsigned long long)ocfs2_metadata_cache_owner(ci),
651 (unsigned long long)bh->b_blocknr, type, bh->b_size);
653 /* we can safely remove this assertion after testing. */
654 if (!buffer_uptodate(bh)) {
655 mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
656 mlog(ML_ERROR, "b_blocknr=%llu, b_state=0x%lx\n",
657 (unsigned long long)bh->b_blocknr, bh->b_state);
661 * A previous transaction with a couple of buffer heads fail
662 * to checkpoint, so all the bhs are marked as BH_Write_EIO.
663 * For current transaction, the bh is just among those error
664 * bhs which previous transaction handle. We can't just clear
665 * its BH_Write_EIO and reuse directly, since other bhs are
666 * not written to disk yet and that will cause metadata
667 * inconsistency. So we should set fs read-only to avoid
670 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) {
672 return ocfs2_error(osb->sb, "A previous attempt to "
673 "write this buffer head failed\n");
678 /* Set the current transaction information on the ci so
679 * that the locking code knows whether it can drop it's locks
680 * on this ci or not. We're protected from the commit
681 * thread updating the current transaction id until
682 * ocfs2_commit_trans() because ocfs2_start_trans() took
683 * j_trans_barrier for us. */
684 ocfs2_set_ci_lock_trans(osb->journal, ci);
686 ocfs2_metadata_cache_io_lock(ci);
688 case OCFS2_JOURNAL_ACCESS_CREATE:
689 case OCFS2_JOURNAL_ACCESS_WRITE:
690 status = jbd2_journal_get_write_access(handle, bh);
693 case OCFS2_JOURNAL_ACCESS_UNDO:
694 status = jbd2_journal_get_undo_access(handle, bh);
699 mlog(ML_ERROR, "Unknown access type!\n");
701 if (!status && ocfs2_meta_ecc(osb) && triggers)
702 jbd2_journal_set_triggers(bh, &triggers->ot_triggers);
703 ocfs2_metadata_cache_io_unlock(ci);
706 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
712 int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci,
713 struct buffer_head *bh, int type)
715 return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type);
718 int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci,
719 struct buffer_head *bh, int type)
721 return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type);
724 int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci,
725 struct buffer_head *bh, int type)
727 return __ocfs2_journal_access(handle, ci, bh, &rb_triggers,
731 int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci,
732 struct buffer_head *bh, int type)
734 return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type);
737 int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci,
738 struct buffer_head *bh, int type)
740 return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type);
743 int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci,
744 struct buffer_head *bh, int type)
746 return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type);
749 int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci,
750 struct buffer_head *bh, int type)
752 return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type);
755 int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci,
756 struct buffer_head *bh, int type)
758 return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type);
761 int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci,
762 struct buffer_head *bh, int type)
764 return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type);
767 int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
768 struct buffer_head *bh, int type)
770 return __ocfs2_journal_access(handle, ci, bh, NULL, type);
773 void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
777 trace_ocfs2_journal_dirty((unsigned long long)bh->b_blocknr);
779 status = jbd2_journal_dirty_metadata(handle, bh);
782 if (!is_handle_aborted(handle)) {
783 journal_t *journal = handle->h_transaction->t_journal;
784 struct super_block *sb = bh->b_bdev->bd_super;
786 mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. "
787 "Aborting transaction and journal.\n");
788 handle->h_err = status;
789 jbd2_journal_abort_handle(handle);
790 jbd2_journal_abort(journal, status);
791 ocfs2_abort(sb, "Journal already aborted.\n");
796 #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
798 void ocfs2_set_journal_params(struct ocfs2_super *osb)
800 journal_t *journal = osb->journal->j_journal;
801 unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL;
803 if (osb->osb_commit_interval)
804 commit_interval = osb->osb_commit_interval;
806 write_lock(&journal->j_state_lock);
807 journal->j_commit_interval = commit_interval;
808 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
809 journal->j_flags |= JBD2_BARRIER;
811 journal->j_flags &= ~JBD2_BARRIER;
812 write_unlock(&journal->j_state_lock);
815 int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
818 struct inode *inode = NULL; /* the journal inode */
819 journal_t *j_journal = NULL;
820 struct ocfs2_dinode *di = NULL;
821 struct buffer_head *bh = NULL;
822 struct ocfs2_super *osb;
827 osb = journal->j_osb;
829 /* already have the inode for our journal */
830 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
837 if (is_bad_inode(inode)) {
838 mlog(ML_ERROR, "access error (bad inode)\n");
845 SET_INODE_JOURNAL(inode);
846 OCFS2_I(inode)->ip_open_count++;
848 /* Skip recovery waits here - journal inode metadata never
849 * changes in a live cluster so it can be considered an
850 * exception to the rule. */
851 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
853 if (status != -ERESTARTSYS)
854 mlog(ML_ERROR, "Could not get lock on journal!\n");
859 di = (struct ocfs2_dinode *)bh->b_data;
861 if (i_size_read(inode) < OCFS2_MIN_JOURNAL_SIZE) {
862 mlog(ML_ERROR, "Journal file size (%lld) is too small!\n",
868 trace_ocfs2_journal_init(i_size_read(inode),
869 (unsigned long long)inode->i_blocks,
870 OCFS2_I(inode)->ip_clusters);
872 /* call the kernels journal init function now */
873 j_journal = jbd2_journal_init_inode(inode);
874 if (j_journal == NULL) {
875 mlog(ML_ERROR, "Linux journal layer error\n");
880 trace_ocfs2_journal_init_maxlen(j_journal->j_maxlen);
882 *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
883 OCFS2_JOURNAL_DIRTY_FL);
885 journal->j_journal = j_journal;
886 journal->j_inode = inode;
889 ocfs2_set_journal_params(osb);
891 journal->j_state = OCFS2_JOURNAL_LOADED;
897 ocfs2_inode_unlock(inode, 1);
900 OCFS2_I(inode)->ip_open_count--;
908 static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di)
910 le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1);
913 static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di)
915 return le32_to_cpu(di->id1.journal1.ij_recovery_generation);
918 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
919 int dirty, int replayed)
923 struct ocfs2_journal *journal = osb->journal;
924 struct buffer_head *bh = journal->j_bh;
925 struct ocfs2_dinode *fe;
927 fe = (struct ocfs2_dinode *)bh->b_data;
929 /* The journal bh on the osb always comes from ocfs2_journal_init()
930 * and was validated there inside ocfs2_inode_lock_full(). It's a
931 * code bug if we mess it up. */
932 BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
934 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
936 flags |= OCFS2_JOURNAL_DIRTY_FL;
938 flags &= ~OCFS2_JOURNAL_DIRTY_FL;
939 fe->id1.journal1.ij_flags = cpu_to_le32(flags);
942 ocfs2_bump_recovery_generation(fe);
944 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
945 status = ocfs2_write_block(osb, bh, INODE_CACHE(journal->j_inode));
953 * If the journal has been kmalloc'd it needs to be freed after this
956 void ocfs2_journal_shutdown(struct ocfs2_super *osb)
958 struct ocfs2_journal *journal = NULL;
960 struct inode *inode = NULL;
961 int num_running_trans = 0;
965 journal = osb->journal;
969 inode = journal->j_inode;
971 if (journal->j_state != OCFS2_JOURNAL_LOADED)
974 /* need to inc inode use count - jbd2_journal_destroy will iput. */
978 num_running_trans = atomic_read(&(osb->journal->j_num_trans));
979 trace_ocfs2_journal_shutdown(num_running_trans);
981 /* Do a commit_cache here. It will flush our journal, *and*
982 * release any locks that are still held.
983 * set the SHUTDOWN flag and release the trans lock.
984 * the commit thread will take the trans lock for us below. */
985 journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN;
987 /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not
988 * drop the trans_lock (which we want to hold until we
989 * completely destroy the journal. */
990 if (osb->commit_task) {
991 /* Wait for the commit thread */
992 trace_ocfs2_journal_shutdown_wait(osb->commit_task);
993 kthread_stop(osb->commit_task);
994 osb->commit_task = NULL;
997 BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
999 if (ocfs2_mount_local(osb)) {
1000 jbd2_journal_lock_updates(journal->j_journal);
1001 status = jbd2_journal_flush(journal->j_journal);
1002 jbd2_journal_unlock_updates(journal->j_journal);
1007 /* Shutdown the kernel journal system */
1008 if (!jbd2_journal_destroy(journal->j_journal) && !status) {
1010 * Do not toggle if flush was unsuccessful otherwise
1011 * will leave dirty metadata in a "clean" journal
1013 status = ocfs2_journal_toggle_dirty(osb, 0, 0);
1017 journal->j_journal = NULL;
1019 OCFS2_I(inode)->ip_open_count--;
1021 /* unlock our journal */
1022 ocfs2_inode_unlock(inode, 1);
1024 brelse(journal->j_bh);
1025 journal->j_bh = NULL;
1027 journal->j_state = OCFS2_JOURNAL_FREE;
1029 // up_write(&journal->j_trans_barrier);
1034 static void ocfs2_clear_journal_error(struct super_block *sb,
1040 olderr = jbd2_journal_errno(journal);
1042 mlog(ML_ERROR, "File system error %d recorded in "
1043 "journal %u.\n", olderr, slot);
1044 mlog(ML_ERROR, "File system on device %s needs checking.\n",
1047 jbd2_journal_ack_err(journal);
1048 jbd2_journal_clear_err(journal);
1052 int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
1055 struct ocfs2_super *osb;
1059 osb = journal->j_osb;
1061 status = jbd2_journal_load(journal->j_journal);
1063 mlog(ML_ERROR, "Failed to load journal!\n");
1067 ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
1069 status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
1075 /* Launch the commit thread */
1077 osb->commit_task = kthread_run(ocfs2_commit_thread, osb,
1078 "ocfs2cmt-%s", osb->uuid_str);
1079 if (IS_ERR(osb->commit_task)) {
1080 status = PTR_ERR(osb->commit_task);
1081 osb->commit_task = NULL;
1082 mlog(ML_ERROR, "unable to launch ocfs2commit thread, "
1083 "error=%d", status);
1087 osb->commit_task = NULL;
1094 /* 'full' flag tells us whether we clear out all blocks or if we just
1095 * mark the journal clean */
1096 int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
1102 status = jbd2_journal_wipe(journal->j_journal, full);
1108 status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0);
1116 static int ocfs2_recovery_completed(struct ocfs2_super *osb)
1119 struct ocfs2_recovery_map *rm = osb->recovery_map;
1121 spin_lock(&osb->osb_lock);
1122 empty = (rm->rm_used == 0);
1123 spin_unlock(&osb->osb_lock);
1128 void ocfs2_wait_for_recovery(struct ocfs2_super *osb)
1130 wait_event(osb->recovery_event, ocfs2_recovery_completed(osb));
1134 * JBD Might read a cached version of another nodes journal file. We
1135 * don't want this as this file changes often and we get no
1136 * notification on those changes. The only way to be sure that we've
1137 * got the most up to date version of those blocks then is to force
1138 * read them off disk. Just searching through the buffer cache won't
1139 * work as there may be pages backing this file which are still marked
1140 * up to date. We know things can't change on this file underneath us
1141 * as we have the lock by now :)
1143 static int ocfs2_force_read_journal(struct inode *inode)
1147 u64 v_blkno, p_blkno, p_blocks, num_blocks;
1148 struct buffer_head *bh = NULL;
1149 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1151 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
1153 while (v_blkno < num_blocks) {
1154 status = ocfs2_extent_map_get_blocks(inode, v_blkno,
1155 &p_blkno, &p_blocks, NULL);
1161 for (i = 0; i < p_blocks; i++, p_blkno++) {
1162 bh = __find_get_block(osb->sb->s_bdev, p_blkno,
1163 osb->sb->s_blocksize);
1164 /* block not cached. */
1170 /* We are reading journal data which should not
1171 * be put in the uptodate cache.
1173 status = ocfs2_read_blocks_sync(osb, p_blkno, 1, &bh);
1183 v_blkno += p_blocks;
1190 struct ocfs2_la_recovery_item {
1191 struct list_head lri_list;
1193 struct ocfs2_dinode *lri_la_dinode;
1194 struct ocfs2_dinode *lri_tl_dinode;
1195 struct ocfs2_quota_recovery *lri_qrec;
1196 enum ocfs2_orphan_reco_type lri_orphan_reco_type;
1199 /* Does the second half of the recovery process. By this point, the
1200 * node is marked clean and can actually be considered recovered,
1201 * hence it's no longer in the recovery map, but there's still some
1202 * cleanup we can do which shouldn't happen within the recovery thread
1203 * as locking in that context becomes very difficult if we are to take
1204 * recovering nodes into account.
1206 * NOTE: This function can and will sleep on recovery of other nodes
1207 * during cluster locking, just like any other ocfs2 process.
1209 void ocfs2_complete_recovery(struct work_struct *work)
1212 struct ocfs2_journal *journal =
1213 container_of(work, struct ocfs2_journal, j_recovery_work);
1214 struct ocfs2_super *osb = journal->j_osb;
1215 struct ocfs2_dinode *la_dinode, *tl_dinode;
1216 struct ocfs2_la_recovery_item *item, *n;
1217 struct ocfs2_quota_recovery *qrec;
1218 enum ocfs2_orphan_reco_type orphan_reco_type;
1219 LIST_HEAD(tmp_la_list);
1221 trace_ocfs2_complete_recovery(
1222 (unsigned long long)OCFS2_I(journal->j_inode)->ip_blkno);
1224 spin_lock(&journal->j_lock);
1225 list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
1226 spin_unlock(&journal->j_lock);
1228 list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) {
1229 list_del_init(&item->lri_list);
1231 ocfs2_wait_on_quotas(osb);
1233 la_dinode = item->lri_la_dinode;
1234 tl_dinode = item->lri_tl_dinode;
1235 qrec = item->lri_qrec;
1236 orphan_reco_type = item->lri_orphan_reco_type;
1238 trace_ocfs2_complete_recovery_slot(item->lri_slot,
1239 la_dinode ? le64_to_cpu(la_dinode->i_blkno) : 0,
1240 tl_dinode ? le64_to_cpu(tl_dinode->i_blkno) : 0,
1244 ret = ocfs2_complete_local_alloc_recovery(osb,
1253 ret = ocfs2_complete_truncate_log_recovery(osb,
1261 ret = ocfs2_recover_orphans(osb, item->lri_slot,
1267 ret = ocfs2_finish_quota_recovery(osb, qrec,
1271 /* Recovery info is already freed now */
1277 trace_ocfs2_complete_recovery_end(ret);
1280 /* NOTE: This function always eats your references to la_dinode and
1281 * tl_dinode, either manually on error, or by passing them to
1282 * ocfs2_complete_recovery */
1283 static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
1285 struct ocfs2_dinode *la_dinode,
1286 struct ocfs2_dinode *tl_dinode,
1287 struct ocfs2_quota_recovery *qrec,
1288 enum ocfs2_orphan_reco_type orphan_reco_type)
1290 struct ocfs2_la_recovery_item *item;
1292 item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
1294 /* Though we wish to avoid it, we are in fact safe in
1295 * skipping local alloc cleanup as fsck.ocfs2 is more
1296 * than capable of reclaiming unused space. */
1301 ocfs2_free_quota_recovery(qrec);
1303 mlog_errno(-ENOMEM);
1307 INIT_LIST_HEAD(&item->lri_list);
1308 item->lri_la_dinode = la_dinode;
1309 item->lri_slot = slot_num;
1310 item->lri_tl_dinode = tl_dinode;
1311 item->lri_qrec = qrec;
1312 item->lri_orphan_reco_type = orphan_reco_type;
1314 spin_lock(&journal->j_lock);
1315 list_add_tail(&item->lri_list, &journal->j_la_cleanups);
1316 queue_work(journal->j_osb->ocfs2_wq, &journal->j_recovery_work);
1317 spin_unlock(&journal->j_lock);
1320 /* Called by the mount code to queue recovery the last part of
1321 * recovery for it's own and offline slot(s). */
1322 void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
1324 struct ocfs2_journal *journal = osb->journal;
1326 if (ocfs2_is_hard_readonly(osb))
1329 /* No need to queue up our truncate_log as regular cleanup will catch
1331 ocfs2_queue_recovery_completion(journal, osb->slot_num,
1332 osb->local_alloc_copy, NULL, NULL,
1333 ORPHAN_NEED_TRUNCATE);
1334 ocfs2_schedule_truncate_log_flush(osb, 0);
1336 osb->local_alloc_copy = NULL;
1338 /* queue to recover orphan slots for all offline slots */
1339 ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
1340 ocfs2_queue_replay_slots(osb, ORPHAN_NEED_TRUNCATE);
1341 ocfs2_free_replay_slots(osb);
1344 void ocfs2_complete_quota_recovery(struct ocfs2_super *osb)
1346 if (osb->quota_rec) {
1347 ocfs2_queue_recovery_completion(osb->journal,
1352 ORPHAN_NEED_TRUNCATE);
1353 osb->quota_rec = NULL;
1357 static int __ocfs2_recovery_thread(void *arg)
1359 int status, node_num, slot_num;
1360 struct ocfs2_super *osb = arg;
1361 struct ocfs2_recovery_map *rm = osb->recovery_map;
1362 int *rm_quota = NULL;
1363 int rm_quota_used = 0, i;
1364 struct ocfs2_quota_recovery *qrec;
1366 /* Whether the quota supported. */
1367 int quota_enabled = OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb,
1368 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)
1369 || OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb,
1370 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA);
1372 status = ocfs2_wait_on_mount(osb);
1377 if (quota_enabled) {
1378 rm_quota = kcalloc(osb->max_slots, sizeof(int), GFP_NOFS);
1385 status = ocfs2_super_lock(osb, 1);
1391 status = ocfs2_compute_replay_slots(osb);
1395 /* queue recovery for our own slot */
1396 ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL,
1397 NULL, NULL, ORPHAN_NO_NEED_TRUNCATE);
1399 spin_lock(&osb->osb_lock);
1400 while (rm->rm_used) {
1401 /* It's always safe to remove entry zero, as we won't
1402 * clear it until ocfs2_recover_node() has succeeded. */
1403 node_num = rm->rm_entries[0];
1404 spin_unlock(&osb->osb_lock);
1405 slot_num = ocfs2_node_num_to_slot(osb, node_num);
1406 trace_ocfs2_recovery_thread_node(node_num, slot_num);
1407 if (slot_num == -ENOENT) {
1412 /* It is a bit subtle with quota recovery. We cannot do it
1413 * immediately because we have to obtain cluster locks from
1414 * quota files and we also don't want to just skip it because
1415 * then quota usage would be out of sync until some node takes
1416 * the slot. So we remember which nodes need quota recovery
1417 * and when everything else is done, we recover quotas. */
1418 if (quota_enabled) {
1419 for (i = 0; i < rm_quota_used
1420 && rm_quota[i] != slot_num; i++)
1423 if (i == rm_quota_used)
1424 rm_quota[rm_quota_used++] = slot_num;
1427 status = ocfs2_recover_node(osb, node_num, slot_num);
1430 ocfs2_recovery_map_clear(osb, node_num);
1433 "Error %d recovering node %d on device (%u,%u)!\n",
1435 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
1436 mlog(ML_ERROR, "Volume requires unmount.\n");
1439 spin_lock(&osb->osb_lock);
1441 spin_unlock(&osb->osb_lock);
1442 trace_ocfs2_recovery_thread_end(status);
1444 /* Refresh all journal recovery generations from disk */
1445 status = ocfs2_check_journals_nolocks(osb);
1446 status = (status == -EROFS) ? 0 : status;
1450 /* Now it is right time to recover quotas... We have to do this under
1451 * superblock lock so that no one can start using the slot (and crash)
1452 * before we recover it */
1453 if (quota_enabled) {
1454 for (i = 0; i < rm_quota_used; i++) {
1455 qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
1457 status = PTR_ERR(qrec);
1461 ocfs2_queue_recovery_completion(osb->journal,
1464 ORPHAN_NEED_TRUNCATE);
1468 ocfs2_super_unlock(osb, 1);
1470 /* queue recovery for offline slots */
1471 ocfs2_queue_replay_slots(osb, ORPHAN_NEED_TRUNCATE);
1474 mutex_lock(&osb->recovery_lock);
1475 if (!status && !ocfs2_recovery_completed(osb)) {
1476 mutex_unlock(&osb->recovery_lock);
1480 ocfs2_free_replay_slots(osb);
1481 osb->recovery_thread_task = NULL;
1482 mb(); /* sync with ocfs2_recovery_thread_running */
1483 wake_up(&osb->recovery_event);
1485 mutex_unlock(&osb->recovery_lock);
1490 /* no one is callint kthread_stop() for us so the kthread() api
1491 * requires that we call do_exit(). And it isn't exported, but
1492 * complete_and_exit() seems to be a minimal wrapper around it. */
1493 complete_and_exit(NULL, status);
1496 void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
1498 mutex_lock(&osb->recovery_lock);
1500 trace_ocfs2_recovery_thread(node_num, osb->node_num,
1501 osb->disable_recovery, osb->recovery_thread_task,
1502 osb->disable_recovery ?
1503 -1 : ocfs2_recovery_map_set(osb, node_num));
1505 if (osb->disable_recovery)
1508 if (osb->recovery_thread_task)
1511 osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb,
1512 "ocfs2rec-%s", osb->uuid_str);
1513 if (IS_ERR(osb->recovery_thread_task)) {
1514 mlog_errno((int)PTR_ERR(osb->recovery_thread_task));
1515 osb->recovery_thread_task = NULL;
1519 mutex_unlock(&osb->recovery_lock);
1520 wake_up(&osb->recovery_event);
1523 static int ocfs2_read_journal_inode(struct ocfs2_super *osb,
1525 struct buffer_head **bh,
1526 struct inode **ret_inode)
1528 int status = -EACCES;
1529 struct inode *inode = NULL;
1531 BUG_ON(slot_num >= osb->max_slots);
1533 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
1535 if (!inode || is_bad_inode(inode)) {
1539 SET_INODE_JOURNAL(inode);
1541 status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE);
1551 if (status || !ret_inode)
1559 /* Does the actual journal replay and marks the journal inode as
1560 * clean. Will only replay if the journal inode is marked dirty. */
1561 static int ocfs2_replay_journal(struct ocfs2_super *osb,
1568 struct inode *inode = NULL;
1569 struct ocfs2_dinode *fe;
1570 journal_t *journal = NULL;
1571 struct buffer_head *bh = NULL;
1574 status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode);
1580 fe = (struct ocfs2_dinode *)bh->b_data;
1581 slot_reco_gen = ocfs2_get_recovery_generation(fe);
1586 * As the fs recovery is asynchronous, there is a small chance that
1587 * another node mounted (and recovered) the slot before the recovery
1588 * thread could get the lock. To handle that, we dirty read the journal
1589 * inode for that slot to get the recovery generation. If it is
1590 * different than what we expected, the slot has been recovered.
1591 * If not, it needs recovery.
1593 if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) {
1594 trace_ocfs2_replay_journal_recovered(slot_num,
1595 osb->slot_recovery_generations[slot_num], slot_reco_gen);
1596 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1601 /* Continue with recovery as the journal has not yet been recovered */
1603 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
1605 trace_ocfs2_replay_journal_lock_err(status);
1606 if (status != -ERESTARTSYS)
1607 mlog(ML_ERROR, "Could not lock journal!\n");
1612 fe = (struct ocfs2_dinode *) bh->b_data;
1614 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
1615 slot_reco_gen = ocfs2_get_recovery_generation(fe);
1617 if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
1618 trace_ocfs2_replay_journal_skip(node_num);
1619 /* Refresh recovery generation for the slot */
1620 osb->slot_recovery_generations[slot_num] = slot_reco_gen;
1624 /* we need to run complete recovery for offline orphan slots */
1625 ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
1627 printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\
1628 "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
1629 MINOR(osb->sb->s_dev));
1631 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
1633 status = ocfs2_force_read_journal(inode);
1639 journal = jbd2_journal_init_inode(inode);
1640 if (journal == NULL) {
1641 mlog(ML_ERROR, "Linux journal layer error\n");
1646 status = jbd2_journal_load(journal);
1651 jbd2_journal_destroy(journal);
1655 ocfs2_clear_journal_error(osb->sb, journal, slot_num);
1657 /* wipe the journal */
1658 jbd2_journal_lock_updates(journal);
1659 status = jbd2_journal_flush(journal);
1660 jbd2_journal_unlock_updates(journal);
1664 /* This will mark the node clean */
1665 flags = le32_to_cpu(fe->id1.journal1.ij_flags);
1666 flags &= ~OCFS2_JOURNAL_DIRTY_FL;
1667 fe->id1.journal1.ij_flags = cpu_to_le32(flags);
1669 /* Increment recovery generation to indicate successful recovery */
1670 ocfs2_bump_recovery_generation(fe);
1671 osb->slot_recovery_generations[slot_num] =
1672 ocfs2_get_recovery_generation(fe);
1674 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
1675 status = ocfs2_write_block(osb, bh, INODE_CACHE(inode));
1682 jbd2_journal_destroy(journal);
1684 printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\
1685 "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev),
1686 MINOR(osb->sb->s_dev));
1688 /* drop the lock on this nodes journal */
1690 ocfs2_inode_unlock(inode, 1);
1699 * Do the most important parts of node recovery:
1700 * - Replay it's journal
1701 * - Stamp a clean local allocator file
1702 * - Stamp a clean truncate log
1703 * - Mark the node clean
1705 * If this function completes without error, a node in OCFS2 can be
1706 * said to have been safely recovered. As a result, failure during the
1707 * second part of a nodes recovery process (local alloc recovery) is
1708 * far less concerning.
1710 static int ocfs2_recover_node(struct ocfs2_super *osb,
1711 int node_num, int slot_num)
1714 struct ocfs2_dinode *la_copy = NULL;
1715 struct ocfs2_dinode *tl_copy = NULL;
1717 trace_ocfs2_recover_node(node_num, slot_num, osb->node_num);
1719 /* Should not ever be called to recover ourselves -- in that
1720 * case we should've called ocfs2_journal_load instead. */
1721 BUG_ON(osb->node_num == node_num);
1723 status = ocfs2_replay_journal(osb, node_num, slot_num);
1725 if (status == -EBUSY) {
1726 trace_ocfs2_recover_node_skip(slot_num, node_num);
1734 /* Stamp a clean local alloc file AFTER recovering the journal... */
1735 status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy);
1741 /* An error from begin_truncate_log_recovery is not
1742 * serious enough to warrant halting the rest of
1744 status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy);
1748 /* Likewise, this would be a strange but ultimately not so
1749 * harmful place to get an error... */
1750 status = ocfs2_clear_slot(osb, slot_num);
1754 /* This will kfree the memory pointed to by la_copy and tl_copy */
1755 ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy,
1756 tl_copy, NULL, ORPHAN_NEED_TRUNCATE);
1764 /* Test node liveness by trylocking his journal. If we get the lock,
1765 * we drop it here. Return 0 if we got the lock, -EAGAIN if node is
1766 * still alive (we couldn't get the lock) and < 0 on error. */
1767 static int ocfs2_trylock_journal(struct ocfs2_super *osb,
1771 struct inode *inode = NULL;
1773 inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
1775 if (inode == NULL) {
1776 mlog(ML_ERROR, "access error\n");
1780 if (is_bad_inode(inode)) {
1781 mlog(ML_ERROR, "access error (bad inode)\n");
1787 SET_INODE_JOURNAL(inode);
1789 flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE;
1790 status = ocfs2_inode_lock_full(inode, NULL, 1, flags);
1792 if (status != -EAGAIN)
1797 ocfs2_inode_unlock(inode, 1);
1804 /* Call this underneath ocfs2_super_lock. It also assumes that the
1805 * slot info struct has been updated from disk. */
1806 int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
1808 unsigned int node_num;
1811 struct buffer_head *bh = NULL;
1812 struct ocfs2_dinode *di;
1814 /* This is called with the super block cluster lock, so we
1815 * know that the slot map can't change underneath us. */
1817 for (i = 0; i < osb->max_slots; i++) {
1818 /* Read journal inode to get the recovery generation */
1819 status = ocfs2_read_journal_inode(osb, i, &bh, NULL);
1824 di = (struct ocfs2_dinode *)bh->b_data;
1825 gen = ocfs2_get_recovery_generation(di);
1829 spin_lock(&osb->osb_lock);
1830 osb->slot_recovery_generations[i] = gen;
1832 trace_ocfs2_mark_dead_nodes(i,
1833 osb->slot_recovery_generations[i]);
1835 if (i == osb->slot_num) {
1836 spin_unlock(&osb->osb_lock);
1840 status = ocfs2_slot_to_node_num_locked(osb, i, &node_num);
1841 if (status == -ENOENT) {
1842 spin_unlock(&osb->osb_lock);
1846 if (__ocfs2_recovery_map_test(osb, node_num)) {
1847 spin_unlock(&osb->osb_lock);
1850 spin_unlock(&osb->osb_lock);
1852 /* Ok, we have a slot occupied by another node which
1853 * is not in the recovery map. We trylock his journal
1854 * file here to test if he's alive. */
1855 status = ocfs2_trylock_journal(osb, i);
1857 /* Since we're called from mount, we know that
1858 * the recovery thread can't race us on
1859 * setting / checking the recovery bits. */
1860 ocfs2_recovery_thread(osb, node_num);
1861 } else if ((status < 0) && (status != -EAGAIN)) {
1873 * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some
1874 * randomness to the timeout to minimize multple nodes firing the timer at the
1877 static inline unsigned long ocfs2_orphan_scan_timeout(void)
1881 get_random_bytes(&time, sizeof(time));
1882 time = ORPHAN_SCAN_SCHEDULE_TIMEOUT + (time % 5000);
1883 return msecs_to_jiffies(time);
1887 * ocfs2_queue_orphan_scan calls ocfs2_queue_recovery_completion for
1888 * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
1889 * is done to catch any orphans that are left over in orphan directories.
1891 * It scans all slots, even ones that are in use. It does so to handle the
1892 * case described below:
1894 * Node 1 has an inode it was using. The dentry went away due to memory
1895 * pressure. Node 1 closes the inode, but it's on the free list. The node
1896 * has the open lock.
1897 * Node 2 unlinks the inode. It grabs the dentry lock to notify others,
1898 * but node 1 has no dentry and doesn't get the message. It trylocks the
1899 * open lock, sees that another node has a PR, and does nothing.
1900 * Later node 2 runs its orphan dir. It igets the inode, trylocks the
1901 * open lock, sees the PR still, and does nothing.
1902 * Basically, we have to trigger an orphan iput on node 1. The only way
1903 * for this to happen is if node 1 runs node 2's orphan dir.
1905 * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
1906 * seconds. It gets an EX lock on os_lockres and checks sequence number
1907 * stored in LVB. If the sequence number has changed, it means some other
1908 * node has done the scan. This node skips the scan and tracks the
1909 * sequence number. If the sequence number didn't change, it means a scan
1910 * hasn't happened. The node queues a scan and increments the
1911 * sequence number in the LVB.
1913 static void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
1915 struct ocfs2_orphan_scan *os;
1919 os = &osb->osb_orphan_scan;
1921 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
1924 trace_ocfs2_queue_orphan_scan_begin(os->os_count, os->os_seqno,
1925 atomic_read(&os->os_state));
1927 status = ocfs2_orphan_scan_lock(osb, &seqno);
1929 if (status != -EAGAIN)
1934 /* Do no queue the tasks if the volume is being umounted */
1935 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
1938 if (os->os_seqno != seqno) {
1939 os->os_seqno = seqno;
1943 for (i = 0; i < osb->max_slots; i++)
1944 ocfs2_queue_recovery_completion(osb->journal, i, NULL, NULL,
1945 NULL, ORPHAN_NO_NEED_TRUNCATE);
1947 * We queued a recovery on orphan slots, increment the sequence
1948 * number and update LVB so other node will skip the scan for a while
1952 os->os_scantime = ktime_get_seconds();
1954 ocfs2_orphan_scan_unlock(osb, seqno);
1956 trace_ocfs2_queue_orphan_scan_end(os->os_count, os->os_seqno,
1957 atomic_read(&os->os_state));
1961 /* Worker task that gets fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT millsec */
1962 static void ocfs2_orphan_scan_work(struct work_struct *work)
1964 struct ocfs2_orphan_scan *os;
1965 struct ocfs2_super *osb;
1967 os = container_of(work, struct ocfs2_orphan_scan,
1968 os_orphan_scan_work.work);
1971 mutex_lock(&os->os_lock);
1972 ocfs2_queue_orphan_scan(osb);
1973 if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE)
1974 queue_delayed_work(osb->ocfs2_wq, &os->os_orphan_scan_work,
1975 ocfs2_orphan_scan_timeout());
1976 mutex_unlock(&os->os_lock);
1979 void ocfs2_orphan_scan_stop(struct ocfs2_super *osb)
1981 struct ocfs2_orphan_scan *os;
1983 os = &osb->osb_orphan_scan;
1984 if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) {
1985 atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
1986 mutex_lock(&os->os_lock);
1987 cancel_delayed_work(&os->os_orphan_scan_work);
1988 mutex_unlock(&os->os_lock);
1992 void ocfs2_orphan_scan_init(struct ocfs2_super *osb)
1994 struct ocfs2_orphan_scan *os;
1996 os = &osb->osb_orphan_scan;
2000 mutex_init(&os->os_lock);
2001 INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work);
2004 void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
2006 struct ocfs2_orphan_scan *os;
2008 os = &osb->osb_orphan_scan;
2009 os->os_scantime = ktime_get_seconds();
2010 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
2011 atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
2013 atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE);
2014 queue_delayed_work(osb->ocfs2_wq, &os->os_orphan_scan_work,
2015 ocfs2_orphan_scan_timeout());
2019 struct ocfs2_orphan_filldir_priv {
2020 struct dir_context ctx;
2022 struct ocfs2_super *osb;
2023 enum ocfs2_orphan_reco_type orphan_reco_type;
2026 static int ocfs2_orphan_filldir(struct dir_context *ctx, const char *name,
2027 int name_len, loff_t pos, u64 ino,
2030 struct ocfs2_orphan_filldir_priv *p =
2031 container_of(ctx, struct ocfs2_orphan_filldir_priv, ctx);
2034 if (name_len == 1 && !strncmp(".", name, 1))
2036 if (name_len == 2 && !strncmp("..", name, 2))
2039 /* do not include dio entry in case of orphan scan */
2040 if ((p->orphan_reco_type == ORPHAN_NO_NEED_TRUNCATE) &&
2041 (!strncmp(name, OCFS2_DIO_ORPHAN_PREFIX,
2042 OCFS2_DIO_ORPHAN_PREFIX_LEN)))
2045 /* Skip bad inodes so that recovery can continue */
2046 iter = ocfs2_iget(p->osb, ino,
2047 OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0);
2051 if (!strncmp(name, OCFS2_DIO_ORPHAN_PREFIX,
2052 OCFS2_DIO_ORPHAN_PREFIX_LEN))
2053 OCFS2_I(iter)->ip_flags |= OCFS2_INODE_DIO_ORPHAN_ENTRY;
2055 /* Skip inodes which are already added to recover list, since dio may
2056 * happen concurrently with unlink/rename */
2057 if (OCFS2_I(iter)->ip_next_orphan) {
2062 trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno);
2063 /* No locking is required for the next_orphan queue as there
2064 * is only ever a single process doing orphan recovery. */
2065 OCFS2_I(iter)->ip_next_orphan = p->head;
2071 static int ocfs2_queue_orphans(struct ocfs2_super *osb,
2073 struct inode **head,
2074 enum ocfs2_orphan_reco_type orphan_reco_type)
2077 struct inode *orphan_dir_inode = NULL;
2078 struct ocfs2_orphan_filldir_priv priv = {
2079 .ctx.actor = ocfs2_orphan_filldir,
2082 .orphan_reco_type = orphan_reco_type
2085 orphan_dir_inode = ocfs2_get_system_file_inode(osb,
2086 ORPHAN_DIR_SYSTEM_INODE,
2088 if (!orphan_dir_inode) {
2094 inode_lock(orphan_dir_inode);
2095 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
2101 status = ocfs2_dir_foreach(orphan_dir_inode, &priv.ctx);
2110 ocfs2_inode_unlock(orphan_dir_inode, 0);
2112 inode_unlock(orphan_dir_inode);
2113 iput(orphan_dir_inode);
2117 static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb,
2122 spin_lock(&osb->osb_lock);
2123 ret = !osb->osb_orphan_wipes[slot];
2124 spin_unlock(&osb->osb_lock);
2128 static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb,
2131 spin_lock(&osb->osb_lock);
2132 /* Mark ourselves such that new processes in delete_inode()
2133 * know to quit early. */
2134 ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
2135 while (osb->osb_orphan_wipes[slot]) {
2136 /* If any processes are already in the middle of an
2137 * orphan wipe on this dir, then we need to wait for
2139 spin_unlock(&osb->osb_lock);
2140 wait_event_interruptible(osb->osb_wipe_event,
2141 ocfs2_orphan_recovery_can_continue(osb, slot));
2142 spin_lock(&osb->osb_lock);
2144 spin_unlock(&osb->osb_lock);
2147 static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb,
2150 ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
2154 * Orphan recovery. Each mounted node has it's own orphan dir which we
2155 * must run during recovery. Our strategy here is to build a list of
2156 * the inodes in the orphan dir and iget/iput them. The VFS does
2157 * (most) of the rest of the work.
2159 * Orphan recovery can happen at any time, not just mount so we have a
2160 * couple of extra considerations.
2162 * - We grab as many inodes as we can under the orphan dir lock -
2163 * doing iget() outside the orphan dir risks getting a reference on
2165 * - We must be sure not to deadlock with other processes on the
2166 * system wanting to run delete_inode(). This can happen when they go
2167 * to lock the orphan dir and the orphan recovery process attempts to
2168 * iget() inside the orphan dir lock. This can be avoided by
2169 * advertising our state to ocfs2_delete_inode().
2171 static int ocfs2_recover_orphans(struct ocfs2_super *osb,
2173 enum ocfs2_orphan_reco_type orphan_reco_type)
2176 struct inode *inode = NULL;
2178 struct ocfs2_inode_info *oi;
2179 struct buffer_head *di_bh = NULL;
2180 struct ocfs2_dinode *di = NULL;
2182 trace_ocfs2_recover_orphans(slot);
2184 ocfs2_mark_recovering_orphan_dir(osb, slot);
2185 ret = ocfs2_queue_orphans(osb, slot, &inode, orphan_reco_type);
2186 ocfs2_clear_recovering_orphan_dir(osb, slot);
2188 /* Error here should be noted, but we want to continue with as
2189 * many queued inodes as we've got. */
2194 oi = OCFS2_I(inode);
2195 trace_ocfs2_recover_orphans_iput(
2196 (unsigned long long)oi->ip_blkno);
2198 iter = oi->ip_next_orphan;
2199 oi->ip_next_orphan = NULL;
2201 if (oi->ip_flags & OCFS2_INODE_DIO_ORPHAN_ENTRY) {
2203 ret = ocfs2_rw_lock(inode, 1);
2209 * We need to take and drop the inode lock to
2210 * force read inode from disk.
2212 ret = ocfs2_inode_lock(inode, &di_bh, 1);
2218 di = (struct ocfs2_dinode *)di_bh->b_data;
2220 if (di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL)) {
2221 ret = ocfs2_truncate_file(inode, di_bh,
2222 i_size_read(inode));
2229 ret = ocfs2_del_inode_from_orphan(osb, inode,
2235 ocfs2_inode_unlock(inode, 1);
2239 ocfs2_rw_unlock(inode, 1);
2241 inode_unlock(inode);
2243 /* clear dio flag in ocfs2_inode_info */
2244 oi->ip_flags &= ~OCFS2_INODE_DIO_ORPHAN_ENTRY;
2246 spin_lock(&oi->ip_lock);
2247 /* Set the proper information to get us going into
2248 * ocfs2_delete_inode. */
2249 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
2250 spin_unlock(&oi->ip_lock);
2260 static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota)
2262 /* This check is good because ocfs2 will wait on our recovery
2263 * thread before changing it to something other than MOUNTED
2265 wait_event(osb->osb_mount_event,
2266 (!quota && atomic_read(&osb->vol_state) == VOLUME_MOUNTED) ||
2267 atomic_read(&osb->vol_state) == VOLUME_MOUNTED_QUOTAS ||
2268 atomic_read(&osb->vol_state) == VOLUME_DISABLED);
2270 /* If there's an error on mount, then we may never get to the
2271 * MOUNTED flag, but this is set right before
2272 * dismount_volume() so we can trust it. */
2273 if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
2274 trace_ocfs2_wait_on_mount(VOLUME_DISABLED);
2275 mlog(0, "mount error, exiting!\n");
2282 static int ocfs2_commit_thread(void *arg)
2285 struct ocfs2_super *osb = arg;
2286 struct ocfs2_journal *journal = osb->journal;
2288 /* we can trust j_num_trans here because _should_stop() is only set in
2289 * shutdown and nobody other than ourselves should be able to start
2290 * transactions. committing on shutdown might take a few iterations
2291 * as final transactions put deleted inodes on the list */
2292 while (!(kthread_should_stop() &&
2293 atomic_read(&journal->j_num_trans) == 0)) {
2295 wait_event_interruptible(osb->checkpoint_event,
2296 atomic_read(&journal->j_num_trans)
2297 || kthread_should_stop());
2299 status = ocfs2_commit_cache(osb);
2301 static unsigned long abort_warn_time;
2303 /* Warn about this once per minute */
2304 if (printk_timed_ratelimit(&abort_warn_time, 60*HZ))
2305 mlog(ML_ERROR, "status = %d, journal is "
2306 "already aborted.\n", status);
2308 * After ocfs2_commit_cache() fails, j_num_trans has a
2309 * non-zero value. Sleep here to avoid a busy-wait
2312 msleep_interruptible(1000);
2315 if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){
2317 "commit_thread: %u transactions pending on "
2319 atomic_read(&journal->j_num_trans));
2326 /* Reads all the journal inodes without taking any cluster locks. Used
2327 * for hard readonly access to determine whether any journal requires
2328 * recovery. Also used to refresh the recovery generation numbers after
2329 * a journal has been recovered by another node.
2331 int ocfs2_check_journals_nolocks(struct ocfs2_super *osb)
2335 struct buffer_head *di_bh = NULL;
2336 struct ocfs2_dinode *di;
2337 int journal_dirty = 0;
2339 for(slot = 0; slot < osb->max_slots; slot++) {
2340 ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL);
2346 di = (struct ocfs2_dinode *) di_bh->b_data;
2348 osb->slot_recovery_generations[slot] =
2349 ocfs2_get_recovery_generation(di);
2351 if (le32_to_cpu(di->id1.journal1.ij_flags) &
2352 OCFS2_JOURNAL_DIRTY_FL)