1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
48 * gfs2_jindex_free - Clear all the journal index information
49 * @sdp: The GFS2 superblock
53 void gfs2_jindex_free(struct gfs2_sbd *sdp)
55 struct list_head list;
56 struct gfs2_jdesc *jd;
58 spin_lock(&sdp->sd_jindex_spin);
59 list_add(&list, &sdp->sd_jindex_list);
60 list_del_init(&sdp->sd_jindex_list);
62 spin_unlock(&sdp->sd_jindex_spin);
64 while (!list_empty(&list)) {
65 jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
66 gfs2_free_journal_extents(jd);
67 list_del(&jd->jd_list);
73 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
75 struct gfs2_jdesc *jd;
78 list_for_each_entry(jd, head, jd_list) {
79 if (jd->jd_jid == jid) {
91 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
93 struct gfs2_jdesc *jd;
95 spin_lock(&sdp->sd_jindex_spin);
96 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
97 spin_unlock(&sdp->sd_jindex_spin);
102 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
104 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
105 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
106 u64 size = i_size_read(jd->jd_inode);
108 if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
111 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
113 if (gfs2_write_alloc_required(ip, 0, size)) {
114 gfs2_consist_inode(ip);
121 static int init_threads(struct gfs2_sbd *sdp)
123 struct task_struct *p;
126 p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
129 fs_err(sdp, "can't start logd thread: %d\n", error);
132 sdp->sd_logd_process = p;
134 p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
137 fs_err(sdp, "can't start quotad thread: %d\n", error);
140 sdp->sd_quotad_process = p;
144 kthread_stop(sdp->sd_logd_process);
145 sdp->sd_logd_process = NULL;
150 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
151 * @sdp: the filesystem
156 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
158 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
159 struct gfs2_glock *j_gl = ip->i_gl;
160 struct gfs2_holder freeze_gh;
161 struct gfs2_log_header_host head;
164 error = init_threads(sdp);
168 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
173 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
175 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
179 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
185 /* Initialize some head of the log stuff */
186 sdp->sd_log_sequence = head.lh_sequence + 1;
187 gfs2_log_pointers_init(sdp, head.lh_blkno);
189 error = gfs2_quota_init(sdp);
193 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
195 gfs2_glock_dq_uninit(&freeze_gh);
200 freeze_gh.gh_flags |= GL_NOCACHE;
201 gfs2_glock_dq_uninit(&freeze_gh);
203 if (sdp->sd_quotad_process)
204 kthread_stop(sdp->sd_quotad_process);
205 sdp->sd_quotad_process = NULL;
206 if (sdp->sd_logd_process)
207 kthread_stop(sdp->sd_logd_process);
208 sdp->sd_logd_process = NULL;
212 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
214 const struct gfs2_statfs_change *str = buf;
216 sc->sc_total = be64_to_cpu(str->sc_total);
217 sc->sc_free = be64_to_cpu(str->sc_free);
218 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
221 static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
223 struct gfs2_statfs_change *str = buf;
225 str->sc_total = cpu_to_be64(sc->sc_total);
226 str->sc_free = cpu_to_be64(sc->sc_free);
227 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
230 int gfs2_statfs_init(struct gfs2_sbd *sdp)
232 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
233 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
234 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
235 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
236 struct buffer_head *m_bh, *l_bh;
237 struct gfs2_holder gh;
240 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
245 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
249 if (sdp->sd_args.ar_spectator) {
250 spin_lock(&sdp->sd_statfs_spin);
251 gfs2_statfs_change_in(m_sc, m_bh->b_data +
252 sizeof(struct gfs2_dinode));
253 spin_unlock(&sdp->sd_statfs_spin);
255 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
259 spin_lock(&sdp->sd_statfs_spin);
260 gfs2_statfs_change_in(m_sc, m_bh->b_data +
261 sizeof(struct gfs2_dinode));
262 gfs2_statfs_change_in(l_sc, l_bh->b_data +
263 sizeof(struct gfs2_dinode));
264 spin_unlock(&sdp->sd_statfs_spin);
272 gfs2_glock_dq_uninit(&gh);
276 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
279 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
280 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
281 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
282 struct buffer_head *l_bh;
287 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
291 gfs2_trans_add_meta(l_ip->i_gl, l_bh);
293 spin_lock(&sdp->sd_statfs_spin);
294 l_sc->sc_total += total;
295 l_sc->sc_free += free;
296 l_sc->sc_dinodes += dinodes;
297 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
298 if (sdp->sd_args.ar_statfs_percent) {
299 x = 100 * l_sc->sc_free;
300 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
301 if (x >= y || x <= -y)
304 spin_unlock(&sdp->sd_statfs_spin);
308 gfs2_wake_up_statfs(sdp);
311 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
312 struct buffer_head *l_bh)
314 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
315 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
316 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
317 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
319 gfs2_trans_add_meta(l_ip->i_gl, l_bh);
320 gfs2_trans_add_meta(m_ip->i_gl, m_bh);
322 spin_lock(&sdp->sd_statfs_spin);
323 m_sc->sc_total += l_sc->sc_total;
324 m_sc->sc_free += l_sc->sc_free;
325 m_sc->sc_dinodes += l_sc->sc_dinodes;
326 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
327 memset(l_bh->b_data + sizeof(struct gfs2_dinode),
328 0, sizeof(struct gfs2_statfs_change));
329 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
330 spin_unlock(&sdp->sd_statfs_spin);
333 int gfs2_statfs_sync(struct super_block *sb, int type)
335 struct gfs2_sbd *sdp = sb->s_fs_info;
336 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
337 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
338 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
339 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
340 struct gfs2_holder gh;
341 struct buffer_head *m_bh, *l_bh;
345 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
350 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
354 spin_lock(&sdp->sd_statfs_spin);
355 gfs2_statfs_change_in(m_sc, m_bh->b_data +
356 sizeof(struct gfs2_dinode));
357 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
358 spin_unlock(&sdp->sd_statfs_spin);
361 spin_unlock(&sdp->sd_statfs_spin);
363 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
367 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
371 update_statfs(sdp, m_bh, l_bh);
372 sdp->sd_statfs_force_sync = 0;
381 gfs2_glock_dq_uninit(&gh);
388 struct list_head list;
389 struct gfs2_holder gh;
393 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
395 * @sdp: the file system
396 * @state: the state to put the transaction lock into
397 * @t_gh: the hold on the transaction lock
402 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
403 struct gfs2_holder *freeze_gh)
405 struct gfs2_inode *ip;
406 struct gfs2_jdesc *jd;
409 struct gfs2_log_header_host lh;
412 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
413 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
418 ip = GFS2_I(jd->jd_inode);
419 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
424 list_add(&lfcc->list, &list);
427 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
428 GL_NOCACHE, freeze_gh);
430 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
431 error = gfs2_jdesc_check(jd);
434 error = gfs2_find_jhead(jd, &lh, false);
437 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
444 gfs2_glock_dq_uninit(freeze_gh);
447 while (!list_empty(&list)) {
448 lfcc = list_entry(list.next, struct lfcc, list);
449 list_del(&lfcc->list);
450 gfs2_glock_dq_uninit(&lfcc->gh);
456 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
458 struct gfs2_dinode *str = buf;
460 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
461 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
462 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
463 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
464 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
465 str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
466 str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
467 str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
468 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
469 str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
470 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
471 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
472 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
473 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
475 str->di_goal_meta = cpu_to_be64(ip->i_goal);
476 str->di_goal_data = cpu_to_be64(ip->i_goal);
477 str->di_generation = cpu_to_be64(ip->i_generation);
479 str->di_flags = cpu_to_be32(ip->i_diskflags);
480 str->di_height = cpu_to_be16(ip->i_height);
481 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
482 !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
484 str->di_depth = cpu_to_be16(ip->i_depth);
485 str->di_entries = cpu_to_be32(ip->i_entries);
487 str->di_eattr = cpu_to_be64(ip->i_eattr);
488 str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
489 str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
490 str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
494 * gfs2_write_inode - Make sure the inode is stable on the disk
496 * @wbc: The writeback control structure
501 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
503 struct gfs2_inode *ip = GFS2_I(inode);
504 struct gfs2_sbd *sdp = GFS2_SB(inode);
505 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
506 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
508 bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
511 gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
512 GFS2_LOG_HEAD_FLUSH_NORMAL |
513 GFS2_LFC_WRITE_INODE);
514 if (bdi->wb.dirty_exceeded)
515 gfs2_ail1_flush(sdp, wbc);
517 filemap_fdatawrite(metamapping);
519 ret = filemap_fdatawait(metamapping);
521 mark_inode_dirty_sync(inode);
523 spin_lock(&inode->i_lock);
524 if (!(inode->i_flags & I_DIRTY))
525 gfs2_ordered_del_inode(ip);
526 spin_unlock(&inode->i_lock);
532 * gfs2_dirty_inode - check for atime updates
533 * @inode: The inode in question
534 * @flags: The type of dirty
536 * Unfortunately it can be called under any combination of inode
537 * glock and transaction lock, so we have to check carefully.
539 * At the moment this deals only with atime - it should be possible
540 * to expand that role in future, once a review of the locking has
544 static void gfs2_dirty_inode(struct inode *inode, int flags)
546 struct gfs2_inode *ip = GFS2_I(inode);
547 struct gfs2_sbd *sdp = GFS2_SB(inode);
548 struct buffer_head *bh;
549 struct gfs2_holder gh;
551 int need_endtrans = 0;
554 if (!(flags & I_DIRTY_INODE))
556 if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
558 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
559 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
561 fs_err(sdp, "dirty_inode: glock %d\n", ret);
565 } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
568 if (current->journal_info == NULL) {
569 ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
571 fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
577 ret = gfs2_meta_inode_buffer(ip, &bh);
579 gfs2_trans_add_meta(ip->i_gl, bh);
580 gfs2_dinode_out(ip, bh->b_data);
588 gfs2_glock_dq_uninit(&gh);
592 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
593 * @sdp: the filesystem
598 int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
600 struct gfs2_holder freeze_gh;
603 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, GL_NOCACHE,
605 if (error && !test_bit(SDF_WITHDRAWN, &sdp->sd_flags))
608 flush_workqueue(gfs2_delete_workqueue);
609 if (sdp->sd_quotad_process)
610 kthread_stop(sdp->sd_quotad_process);
611 sdp->sd_quotad_process = NULL;
612 if (sdp->sd_logd_process)
613 kthread_stop(sdp->sd_logd_process);
614 sdp->sd_logd_process = NULL;
616 gfs2_quota_sync(sdp->sd_vfs, 0);
617 gfs2_statfs_sync(sdp->sd_vfs, 0);
619 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
620 GFS2_LFC_MAKE_FS_RO);
621 wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
622 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
624 if (gfs2_holder_initialized(&freeze_gh))
625 gfs2_glock_dq_uninit(&freeze_gh);
627 gfs2_quota_cleanup(sdp);
633 * gfs2_put_super - Unmount the filesystem
634 * @sb: The VFS superblock
638 static void gfs2_put_super(struct super_block *sb)
640 struct gfs2_sbd *sdp = sb->s_fs_info;
642 struct gfs2_jdesc *jd;
644 /* No more recovery requests */
645 set_bit(SDF_NORECOVERY, &sdp->sd_flags);
648 /* Wait on outstanding recovery */
650 spin_lock(&sdp->sd_jindex_spin);
651 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
652 if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
654 spin_unlock(&sdp->sd_jindex_spin);
655 wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
656 TASK_UNINTERRUPTIBLE);
659 spin_unlock(&sdp->sd_jindex_spin);
661 if (!sb_rdonly(sb)) {
662 error = gfs2_make_fs_ro(sdp);
666 /* At this point, we're through modifying the disk */
670 iput(sdp->sd_jindex);
671 iput(sdp->sd_statfs_inode);
672 iput(sdp->sd_rindex);
673 iput(sdp->sd_quota_inode);
675 gfs2_glock_put(sdp->sd_rename_gl);
676 gfs2_glock_put(sdp->sd_freeze_gl);
678 if (!sdp->sd_args.ar_spectator) {
679 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
680 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
681 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
682 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
683 iput(sdp->sd_sc_inode);
684 iput(sdp->sd_qc_inode);
687 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
688 gfs2_clear_rgrpd(sdp);
689 gfs2_jindex_free(sdp);
690 /* Take apart glock structures and buffer lists */
691 gfs2_gl_hash_clear(sdp);
692 gfs2_delete_debugfs_file(sdp);
693 /* Unmount the locking protocol */
694 gfs2_lm_unmount(sdp);
696 /* At this point, we're through participating in the lockspace */
697 gfs2_sys_fs_del(sdp);
701 * gfs2_sync_fs - sync the filesystem
702 * @sb: the superblock
704 * Flushes the log to disk.
707 static int gfs2_sync_fs(struct super_block *sb, int wait)
709 struct gfs2_sbd *sdp = sb->s_fs_info;
711 gfs2_quota_sync(sb, -1);
713 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
715 return sdp->sd_log_error;
718 void gfs2_freeze_func(struct work_struct *work)
721 struct gfs2_holder freeze_gh;
722 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
723 struct super_block *sb = sdp->sd_vfs;
725 atomic_inc(&sb->s_active);
726 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
729 fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
730 gfs2_assert_withdraw(sdp, 0);
732 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
733 error = thaw_super(sb);
735 fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
737 gfs2_assert_withdraw(sdp, 0);
739 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
740 freeze_gh.gh_flags |= GL_NOCACHE;
741 gfs2_glock_dq_uninit(&freeze_gh);
743 deactivate_super(sb);
744 clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
745 wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
750 * gfs2_freeze - prevent further writes to the filesystem
751 * @sb: the VFS structure for the filesystem
755 static int gfs2_freeze(struct super_block *sb)
757 struct gfs2_sbd *sdp = sb->s_fs_info;
760 mutex_lock(&sdp->sd_freeze_mutex);
761 if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
764 if (test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
770 error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
775 fs_err(sdp, "waiting for recovery before freeze\n");
777 fs_err(sdp, "error freezing FS: %d\n", error);
779 fs_err(sdp, "retrying...\n");
782 set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
784 mutex_unlock(&sdp->sd_freeze_mutex);
789 * gfs2_unfreeze - reallow writes to the filesystem
790 * @sb: the VFS structure for the filesystem
794 static int gfs2_unfreeze(struct super_block *sb)
796 struct gfs2_sbd *sdp = sb->s_fs_info;
798 mutex_lock(&sdp->sd_freeze_mutex);
799 if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
800 !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
801 mutex_unlock(&sdp->sd_freeze_mutex);
805 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
806 mutex_unlock(&sdp->sd_freeze_mutex);
807 return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
811 * statfs_fill - fill in the sg for a given RG
813 * @sc: the sc structure
815 * Returns: 0 on success, -ESTALE if the LVB is invalid
818 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
819 struct gfs2_statfs_change_host *sc)
821 gfs2_rgrp_verify(rgd);
822 sc->sc_total += rgd->rd_data;
823 sc->sc_free += rgd->rd_free;
824 sc->sc_dinodes += rgd->rd_dinodes;
829 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
830 * @sdp: the filesystem
831 * @sc: the sc info that will be returned
833 * Any error (other than a signal) will cause this routine to fall back
834 * to the synchronous version.
836 * FIXME: This really shouldn't busy wait like this.
841 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
843 struct gfs2_rgrpd *rgd_next;
844 struct gfs2_holder *gha, *gh;
845 unsigned int slots = 64;
850 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
851 gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
854 for (x = 0; x < slots; x++)
855 gfs2_holder_mark_uninitialized(gha + x);
857 rgd_next = gfs2_rgrpd_get_first(sdp);
862 for (x = 0; x < slots; x++) {
865 if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
866 err = gfs2_glock_wait(gh);
868 gfs2_holder_uninit(gh);
872 struct gfs2_rgrpd *rgd =
873 gfs2_glock2rgrp(gh->gh_gl);
875 error = statfs_slow_fill(rgd, sc);
877 gfs2_glock_dq_uninit(gh);
881 if (gfs2_holder_initialized(gh))
883 else if (rgd_next && !error) {
884 error = gfs2_glock_nq_init(rgd_next->rd_gl,
888 rgd_next = gfs2_rgrpd_get_next(rgd_next);
892 if (signal_pending(current))
893 error = -ERESTARTSYS;
907 * gfs2_statfs_i - Do a statfs
908 * @sdp: the filesystem
909 * @sg: the sg structure
914 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
916 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
917 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
919 spin_lock(&sdp->sd_statfs_spin);
922 sc->sc_total += l_sc->sc_total;
923 sc->sc_free += l_sc->sc_free;
924 sc->sc_dinodes += l_sc->sc_dinodes;
926 spin_unlock(&sdp->sd_statfs_spin);
930 if (sc->sc_free > sc->sc_total)
931 sc->sc_free = sc->sc_total;
932 if (sc->sc_dinodes < 0)
939 * gfs2_statfs - Gather and return stats about the filesystem
940 * @sb: The superblock
941 * @statfsbuf: The buffer
943 * Returns: 0 on success or error code
946 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
948 struct super_block *sb = dentry->d_sb;
949 struct gfs2_sbd *sdp = sb->s_fs_info;
950 struct gfs2_statfs_change_host sc;
953 error = gfs2_rindex_update(sdp);
957 if (gfs2_tune_get(sdp, gt_statfs_slow))
958 error = gfs2_statfs_slow(sdp, &sc);
960 error = gfs2_statfs_i(sdp, &sc);
965 buf->f_type = GFS2_MAGIC;
966 buf->f_bsize = sdp->sd_sb.sb_bsize;
967 buf->f_blocks = sc.sc_total;
968 buf->f_bfree = sc.sc_free;
969 buf->f_bavail = sc.sc_free;
970 buf->f_files = sc.sc_dinodes + sc.sc_free;
971 buf->f_ffree = sc.sc_free;
972 buf->f_namelen = GFS2_FNAMESIZE;
978 * gfs2_drop_inode - Drop an inode (test for remote unlink)
979 * @inode: The inode to drop
981 * If we've received a callback on an iopen lock then it's because a
982 * remote node tried to deallocate the inode but failed due to this node
983 * still having the inode open. Here we mark the link count zero
984 * since we know that it must have reached zero if the GLF_DEMOTE flag
985 * is set on the iopen glock. If we didn't do a disk read since the
986 * remote node removed the final link then we might otherwise miss
987 * this event. This check ensures that this node will deallocate the
988 * inode's blocks, or alternatively pass the baton on to another
989 * node for later deallocation.
992 static int gfs2_drop_inode(struct inode *inode)
994 struct gfs2_inode *ip = GFS2_I(inode);
996 if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
998 gfs2_holder_initialized(&ip->i_iopen_gh)) {
999 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1000 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
1005 * When under memory pressure when an inode's link count has dropped to
1006 * zero, defer deleting the inode to the delete workqueue. This avoids
1007 * calling into DLM under memory pressure, which can deadlock.
1009 if (!inode->i_nlink &&
1010 unlikely(current->flags & PF_MEMALLOC) &&
1011 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1012 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1014 gfs2_glock_hold(gl);
1015 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1016 gfs2_glock_queue_put(gl);
1020 return generic_drop_inode(inode);
1023 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1029 } while (!IS_ROOT(d1));
1034 * gfs2_show_options - Show mount options for /proc/mounts
1035 * @s: seq_file structure
1036 * @root: root of this (sub)tree
1038 * Returns: 0 on success or error code
1041 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1043 struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1044 struct gfs2_args *args = &sdp->sd_args;
1047 if (is_ancestor(root, sdp->sd_master_dir))
1048 seq_puts(s, ",meta");
1049 if (args->ar_lockproto[0])
1050 seq_show_option(s, "lockproto", args->ar_lockproto);
1051 if (args->ar_locktable[0])
1052 seq_show_option(s, "locktable", args->ar_locktable);
1053 if (args->ar_hostdata[0])
1054 seq_show_option(s, "hostdata", args->ar_hostdata);
1055 if (args->ar_spectator)
1056 seq_puts(s, ",spectator");
1057 if (args->ar_localflocks)
1058 seq_puts(s, ",localflocks");
1060 seq_puts(s, ",debug");
1061 if (args->ar_posix_acl)
1062 seq_puts(s, ",acl");
1063 if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1065 switch (args->ar_quota) {
1066 case GFS2_QUOTA_OFF:
1069 case GFS2_QUOTA_ACCOUNT:
1079 seq_printf(s, ",quota=%s", state);
1081 if (args->ar_suiddir)
1082 seq_puts(s, ",suiddir");
1083 if (args->ar_data != GFS2_DATA_DEFAULT) {
1085 switch (args->ar_data) {
1086 case GFS2_DATA_WRITEBACK:
1087 state = "writeback";
1089 case GFS2_DATA_ORDERED:
1096 seq_printf(s, ",data=%s", state);
1098 if (args->ar_discard)
1099 seq_puts(s, ",discard");
1100 val = sdp->sd_tune.gt_logd_secs;
1102 seq_printf(s, ",commit=%d", val);
1103 val = sdp->sd_tune.gt_statfs_quantum;
1105 seq_printf(s, ",statfs_quantum=%d", val);
1106 else if (sdp->sd_tune.gt_statfs_slow)
1107 seq_puts(s, ",statfs_quantum=0");
1108 val = sdp->sd_tune.gt_quota_quantum;
1110 seq_printf(s, ",quota_quantum=%d", val);
1111 if (args->ar_statfs_percent)
1112 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1113 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1116 switch (args->ar_errors) {
1117 case GFS2_ERRORS_WITHDRAW:
1120 case GFS2_ERRORS_PANIC:
1127 seq_printf(s, ",errors=%s", state);
1129 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1130 seq_puts(s, ",nobarrier");
1131 if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1132 seq_puts(s, ",demote_interface_used");
1133 if (args->ar_rgrplvb)
1134 seq_puts(s, ",rgrplvb");
1135 if (args->ar_loccookie)
1136 seq_puts(s, ",loccookie");
1140 static void gfs2_final_release_pages(struct gfs2_inode *ip)
1142 struct inode *inode = &ip->i_inode;
1143 struct gfs2_glock *gl = ip->i_gl;
1145 truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
1146 truncate_inode_pages(&inode->i_data, 0);
1148 if (atomic_read(&gl->gl_revokes) == 0) {
1149 clear_bit(GLF_LFLUSH, &gl->gl_flags);
1150 clear_bit(GLF_DIRTY, &gl->gl_flags);
1154 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1156 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1157 struct gfs2_rgrpd *rgd;
1158 struct gfs2_holder gh;
1161 if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1162 gfs2_consist_inode(ip);
1166 error = gfs2_rindex_update(sdp);
1170 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1174 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1176 gfs2_consist_inode(ip);
1181 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1185 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1186 sdp->sd_jdesc->jd_blocks);
1188 goto out_rg_gunlock;
1190 gfs2_free_di(rgd, ip);
1192 gfs2_final_release_pages(ip);
1194 gfs2_trans_end(sdp);
1197 gfs2_glock_dq_uninit(&gh);
1199 gfs2_quota_unhold(ip);
1204 * gfs2_glock_put_eventually
1205 * @gl: The glock to put
1207 * When under memory pressure, trigger a deferred glock put to make sure we
1208 * won't call into DLM and deadlock. Otherwise, put the glock directly.
1211 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1213 if (current->flags & PF_MEMALLOC)
1214 gfs2_glock_queue_put(gl);
1220 * gfs2_evict_inode - Remove an inode from cache
1221 * @inode: The inode to evict
1223 * There are three cases to consider:
1224 * 1. i_nlink == 0, we are final opener (and must deallocate)
1225 * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1228 * If the fs is read only, then we have to treat all cases as per #3
1229 * since we are unable to do any deallocation. The inode will be
1230 * deallocated by the next read/write node to attempt an allocation
1231 * in the same resource group
1233 * We have to (at the moment) hold the inodes main lock to cover
1234 * the gap between unlocking the shared lock on the iopen lock and
1235 * taking the exclusive lock. I'd rather do a shared -> exclusive
1236 * conversion on the iopen lock, but we can change that later. This
1237 * is safe, just less efficient.
1240 static void gfs2_evict_inode(struct inode *inode)
1242 struct super_block *sb = inode->i_sb;
1243 struct gfs2_sbd *sdp = sb->s_fs_info;
1244 struct gfs2_inode *ip = GFS2_I(inode);
1245 struct gfs2_holder gh;
1246 struct address_space *metamapping;
1249 if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) {
1254 if (inode->i_nlink || sb_rdonly(sb))
1257 if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1258 BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1259 gfs2_holder_mark_uninitialized(&gh);
1263 /* Deletes should never happen under memory pressure anymore. */
1264 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1267 /* Must not read inode block until block type has been verified */
1268 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
1269 if (unlikely(error)) {
1270 glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1271 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1272 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1276 error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1280 if (test_bit(GIF_INVALID, &ip->i_flags)) {
1281 error = gfs2_inode_refresh(ip);
1287 * The inode may have been recreated in the meantime.
1293 if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1294 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1295 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1296 gfs2_glock_dq_wait(&ip->i_iopen_gh);
1297 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE,
1299 error = gfs2_glock_nq(&ip->i_iopen_gh);
1304 if (S_ISDIR(inode->i_mode) &&
1305 (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1306 error = gfs2_dir_exhash_dealloc(ip);
1312 error = gfs2_ea_dealloc(ip);
1317 if (!gfs2_is_stuffed(ip)) {
1318 error = gfs2_file_dealloc(ip);
1323 /* We're about to clear the bitmap for the dinode, but as soon as we
1324 do, gfs2_create_inode can create another inode at the same block
1325 location and try to set gl_object again. We clear gl_object here so
1326 that subsequent inode creates don't see an old gl_object. */
1327 glock_clear_object(ip->i_gl, ip);
1328 error = gfs2_dinode_dealloc(ip);
1332 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1333 GFS2_LFC_EVICT_INODE);
1334 metamapping = gfs2_glock2aspace(ip->i_gl);
1335 if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1336 filemap_fdatawrite(metamapping);
1337 filemap_fdatawait(metamapping);
1339 write_inode_now(inode, 1);
1340 gfs2_ail_flush(ip->i_gl, 0);
1342 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1345 /* Needs to be done before glock release & also in a transaction */
1346 truncate_inode_pages(&inode->i_data, 0);
1347 truncate_inode_pages(metamapping, 0);
1348 gfs2_trans_end(sdp);
1351 if (gfs2_rs_active(&ip->i_res))
1352 gfs2_rs_deltree(&ip->i_res);
1354 if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1355 glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1356 if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1357 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1358 gfs2_glock_dq(&ip->i_iopen_gh);
1360 gfs2_holder_uninit(&ip->i_iopen_gh);
1362 if (gfs2_holder_initialized(&gh)) {
1363 glock_clear_object(ip->i_gl, ip);
1364 gfs2_glock_dq_uninit(&gh);
1366 if (error && error != GLR_TRYFAILED && error != -EROFS)
1367 fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
1369 truncate_inode_pages_final(&inode->i_data);
1370 gfs2_rsqa_delete(ip, NULL);
1371 gfs2_ordered_del_inode(ip);
1373 gfs2_dir_hash_inval(ip);
1374 glock_clear_object(ip->i_gl, ip);
1375 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1376 gfs2_glock_add_to_lru(ip->i_gl);
1377 gfs2_glock_put_eventually(ip->i_gl);
1379 if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1380 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1382 glock_clear_object(gl, ip);
1383 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1384 gfs2_glock_hold(gl);
1385 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1386 gfs2_glock_put_eventually(gl);
1390 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1392 struct gfs2_inode *ip;
1394 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
1399 memset(&ip->i_res, 0, sizeof(ip->i_res));
1400 RB_CLEAR_NODE(&ip->i_res.rs_node);
1402 return &ip->i_inode;
1405 static void gfs2_free_inode(struct inode *inode)
1407 kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1410 const struct super_operations gfs2_super_ops = {
1411 .alloc_inode = gfs2_alloc_inode,
1412 .free_inode = gfs2_free_inode,
1413 .write_inode = gfs2_write_inode,
1414 .dirty_inode = gfs2_dirty_inode,
1415 .evict_inode = gfs2_evict_inode,
1416 .put_super = gfs2_put_super,
1417 .sync_fs = gfs2_sync_fs,
1418 .freeze_super = gfs2_freeze,
1419 .thaw_super = gfs2_unfreeze,
1420 .statfs = gfs2_statfs,
1421 .drop_inode = gfs2_drop_inode,
1422 .show_options = gfs2_show_options,