]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - fs/xfs/scrub/ialloc.c
xfs: hoist inode cluster checks out of loop
[linux.git] / fs / xfs / scrub / ialloc.c
index 224dba937492aa188e457a050c752577f825b4bb..0ce793d92995898086b8420b8e412834c99db674 100644 (file)
@@ -44,6 +44,17 @@ xchk_setup_ag_iallocbt(
 
 /* Inode btree scrubber. */
 
+struct xchk_iallocbt {
+       /* Number of inodes we see while scanning inobt. */
+       unsigned long long      inodes;
+
+       /* Expected next startino, for big block filesystems. */
+       xfs_agino_t             next_startino;
+
+       /* Expected end of the current inode cluster. */
+       xfs_agino_t             next_cluster_ino;
+};
+
 /*
  * If we're checking the finobt, cross-reference with the inobt.
  * Otherwise we're checking the inobt; if there is an finobt, make sure
@@ -82,15 +93,12 @@ xchk_iallocbt_chunk_xref(
        xfs_agblock_t                   agbno,
        xfs_extlen_t                    len)
 {
-       struct xfs_owner_info           oinfo;
-
        if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
                return;
 
        xchk_xref_is_used_space(sc, agbno, len);
        xchk_iallocbt_chunk_xref_other(sc, irec, agino);
-       xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
-       xchk_xref_is_owned_by(sc, agbno, len, &oinfo);
+       xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
        xchk_xref_is_not_shared(sc, agbno, len);
 }
 
@@ -180,89 +188,178 @@ xchk_iallocbt_check_cluster_freemask(
        return 0;
 }
 
-/* Make sure the free mask is consistent with what the inodes think. */
+/* Check an inode cluster. */
 STATIC int
-xchk_iallocbt_check_freemask(
+xchk_iallocbt_check_cluster(
        struct xchk_btree               *bs,
-       struct xfs_inobt_rec_incore     *irec)
+       struct xfs_inobt_rec_incore     *irec,
+       xfs_agino_t                     agino)
 {
-       struct xfs_owner_info           oinfo;
        struct xfs_imap                 imap;
        struct xfs_mount                *mp = bs->cur->bc_mp;
        struct xfs_dinode               *dip;
        struct xfs_buf                  *bp;
        xfs_ino_t                       fsino;
-       xfs_agino_t                     nr_inodes;
-       xfs_agino_t                     agino;
+       unsigned int                    nr_inodes;
        xfs_agino_t                     chunkino;
        xfs_agino_t                     clusterino;
        xfs_agblock_t                   agbno;
-       int                             blks_per_cluster;
        uint16_t                        holemask;
        uint16_t                        ir_holemask;
        int                             error = 0;
 
        /* Make sure the freemask matches the inode records. */
-       blks_per_cluster = xfs_icluster_size_fsb(mp);
-       nr_inodes = XFS_OFFBNO_TO_AGINO(mp, blks_per_cluster, 0);
-       xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
+       nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
+                       mp->m_inodes_per_cluster);
+
+       fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
+       chunkino = agino - irec->ir_startino;
+       agbno = XFS_AGINO_TO_AGBNO(mp, agino);
+
+       /* Compute the holemask mask for this cluster. */
+       for (clusterino = 0, holemask = 0; clusterino < nr_inodes;
+            clusterino += XFS_INODES_PER_HOLEMASK_BIT)
+               holemask |= XFS_INOBT_MASK((chunkino + clusterino) /
+                               XFS_INODES_PER_HOLEMASK_BIT);
+
+       /* The whole cluster must be a hole or not a hole. */
+       ir_holemask = (irec->ir_holemask & holemask);
+       if (ir_holemask != holemask && ir_holemask != 0) {
+               xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+               return 0;
+       }
+
+       /* If any part of this is a hole, skip it. */
+       if (ir_holemask) {
+               xchk_xref_is_not_owned_by(bs->sc, agbno,
+                               mp->m_blocks_per_cluster,
+                               &XFS_RMAP_OINFO_INODES);
+               return 0;
+       }
+
+       xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
+                       &XFS_RMAP_OINFO_INODES);
+
+       /* Grab the inode cluster buffer. */
+       imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno, agbno);
+       imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
+       imap.im_boffset = 0;
+
+       error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &bp, 0, 0);
+       if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
+               return 0;
+
+       /* Which inodes are free? */
+       for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
+               error = xchk_iallocbt_check_cluster_freemask(bs, fsino,
+                               chunkino, clusterino, irec, bp);
+               if (error)
+                       break;
+       }
+
+       xfs_trans_brelse(bs->cur->bc_tp, bp);
+       return error;
+}
+
+/* Make sure the free mask is consistent with what the inodes think. */
+STATIC int
+xchk_iallocbt_check_freemask(
+       struct xchk_btree               *bs,
+       struct xfs_inobt_rec_incore     *irec)
+{
+       struct xfs_mount                *mp = bs->cur->bc_mp;
+       xfs_agino_t                     agino;
+       int                             error = 0;
 
        for (agino = irec->ir_startino;
             agino < irec->ir_startino + XFS_INODES_PER_CHUNK;
-            agino += blks_per_cluster * mp->m_sb.sb_inopblock) {
-               fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
-               chunkino = agino - irec->ir_startino;
-               agbno = XFS_AGINO_TO_AGBNO(mp, agino);
-
-               /* Compute the holemask mask for this cluster. */
-               for (clusterino = 0, holemask = 0; clusterino < nr_inodes;
-                    clusterino += XFS_INODES_PER_HOLEMASK_BIT)
-                       holemask |= XFS_INOBT_MASK((chunkino + clusterino) /
-                                       XFS_INODES_PER_HOLEMASK_BIT);
-
-               /* The whole cluster must be a hole or not a hole. */
-               ir_holemask = (irec->ir_holemask & holemask);
-               if (ir_holemask != holemask && ir_holemask != 0) {
+            agino += mp->m_inodes_per_cluster) {
+               error = xchk_iallocbt_check_cluster(bs, irec, agino);
+               if (error)
+                       break;
+       }
+
+       return error;
+}
+
+/*
+ * Make sure this inode btree record is aligned properly.  Because a fs block
+ * contains multiple inodes, we check that the inobt record is aligned to the
+ * correct inode, not just the correct block on disk.  This results in a finer
+ * grained corruption check.
+ */
+STATIC void
+xchk_iallocbt_rec_alignment(
+       struct xchk_btree               *bs,
+       struct xfs_inobt_rec_incore     *irec)
+{
+       struct xfs_mount                *mp = bs->sc->mp;
+       struct xchk_iallocbt            *iabt = bs->private;
+
+       /*
+        * finobt records have different positioning requirements than inobt
+        * records: each finobt record must have a corresponding inobt record.
+        * That is checked in the xref function, so for now we only catch the
+        * obvious case where the record isn't at all aligned properly.
+        *
+        * Note that if a fs block contains more than a single chunk of inodes,
+        * we will have finobt records only for those chunks containing free
+        * inodes, and therefore expect chunk alignment of finobt records.
+        * Otherwise, we expect that the finobt record is aligned to the
+        * cluster alignment as told by the superblock.
+        */
+       if (bs->cur->bc_btnum == XFS_BTNUM_FINO) {
+               unsigned int    imask;
+
+               imask = min_t(unsigned int, XFS_INODES_PER_CHUNK,
+                               mp->m_cluster_align_inodes) - 1;
+               if (irec->ir_startino & imask)
                        xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
-                       continue;
-               }
+               return;
+       }
 
-               /* If any part of this is a hole, skip it. */
-               if (ir_holemask) {
-                       xchk_xref_is_not_owned_by(bs->sc, agbno,
-                                       blks_per_cluster, &oinfo);
-                       continue;
+       if (iabt->next_startino != NULLAGINO) {
+               /*
+                * We're midway through a cluster of inodes that is mapped by
+                * multiple inobt records.  Did we get the record for the next
+                * irec in the sequence?
+                */
+               if (irec->ir_startino != iabt->next_startino) {
+                       xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+                       return;
                }
 
-               xchk_xref_is_owned_by(bs->sc, agbno, blks_per_cluster,
-                               &oinfo);
-
-               /* Grab the inode cluster buffer. */
-               imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno,
-                               agbno);
-               imap.im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
-               imap.im_boffset = 0;
-
-               error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap,
-                               &dip, &bp, 0, 0);
-               if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0,
-                               &error))
-                       continue;
-
-               /* Which inodes are free? */
-               for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
-                       error = xchk_iallocbt_check_cluster_freemask(bs,
-                                       fsino, chunkino, clusterino, irec, bp);
-                       if (error) {
-                               xfs_trans_brelse(bs->cur->bc_tp, bp);
-                               return error;
-                       }
+               iabt->next_startino += XFS_INODES_PER_CHUNK;
+
+               /* Are we done with the cluster? */
+               if (iabt->next_startino >= iabt->next_cluster_ino) {
+                       iabt->next_startino = NULLAGINO;
+                       iabt->next_cluster_ino = NULLAGINO;
                }
+               return;
+       }
 
-               xfs_trans_brelse(bs->cur->bc_tp, bp);
+       /* inobt records must be aligned to cluster and inoalignmnt size. */
+       if (irec->ir_startino & (mp->m_cluster_align_inodes - 1)) {
+               xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+               return;
        }
 
-       return error;
+       if (irec->ir_startino & (mp->m_inodes_per_cluster - 1)) {
+               xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+               return;
+       }
+
+       if (mp->m_inodes_per_cluster <= XFS_INODES_PER_CHUNK)
+               return;
+
+       /*
+        * If this is the start of an inode cluster that can be mapped by
+        * multiple inobt records, the next inobt record must follow exactly
+        * after this one.
+        */
+       iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK;
+       iabt->next_cluster_ino = irec->ir_startino + mp->m_inodes_per_cluster;
 }
 
 /* Scrub an inobt/finobt record. */
@@ -272,12 +369,11 @@ xchk_iallocbt_rec(
        union xfs_btree_rec             *rec)
 {
        struct xfs_mount                *mp = bs->cur->bc_mp;
-       xfs_filblks_t                   *inode_blocks = bs->private;
+       struct xchk_iallocbt            *iabt = bs->private;
        struct xfs_inobt_rec_incore     irec;
        uint64_t                        holes;
        xfs_agnumber_t                  agno = bs->cur->bc_private.a.agno;
        xfs_agino_t                     agino;
-       xfs_agblock_t                   agbno;
        xfs_extlen_t                    len;
        int                             holecount;
        int                             i;
@@ -304,14 +400,11 @@ xchk_iallocbt_rec(
                goto out;
        }
 
-       /* Make sure this record is aligned to cluster and inoalignmnt size. */
-       agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino);
-       if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) ||
-           (agbno & (xfs_icluster_size_fsb(mp) - 1)))
-               xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+       xchk_iallocbt_rec_alignment(bs, &irec);
+       if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+               goto out;
 
-       *inode_blocks += XFS_B_TO_FSB(mp,
-                       irec.ir_count * mp->m_sb.sb_inodesize);
+       iabt->inodes += irec.ir_count;
 
        /* Handle non-sparse inodes */
        if (!xfs_inobt_issparse(irec.ir_holemask)) {
@@ -366,7 +459,6 @@ xchk_iallocbt_xref_rmap_btreeblks(
        struct xfs_scrub        *sc,
        int                     which)
 {
-       struct xfs_owner_info   oinfo;
        xfs_filblks_t           blocks;
        xfs_extlen_t            inobt_blocks = 0;
        xfs_extlen_t            finobt_blocks = 0;
@@ -388,9 +480,8 @@ xchk_iallocbt_xref_rmap_btreeblks(
                        return;
        }
 
-       xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
-       error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
-                       &blocks);
+       error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
+                       &XFS_RMAP_OINFO_INOBT, &blocks);
        if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
                return;
        if (blocks != inobt_blocks + finobt_blocks)
@@ -405,21 +496,21 @@ STATIC void
 xchk_iallocbt_xref_rmap_inodes(
        struct xfs_scrub        *sc,
        int                     which,
-       xfs_filblks_t           inode_blocks)
+       unsigned long long      inodes)
 {
-       struct xfs_owner_info   oinfo;
        xfs_filblks_t           blocks;
+       xfs_filblks_t           inode_blocks;
        int                     error;
 
        if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
                return;
 
        /* Check that we saw as many inode blocks as the rmap knows about. */
-       xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
-       error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
-                       &blocks);
+       error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
+                       &XFS_RMAP_OINFO_INODES, &blocks);
        if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
                return;
+       inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
        if (blocks != inode_blocks)
                xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
 }
@@ -431,14 +522,16 @@ xchk_iallocbt(
        xfs_btnum_t             which)
 {
        struct xfs_btree_cur    *cur;
-       struct xfs_owner_info   oinfo;
-       xfs_filblks_t           inode_blocks = 0;
+       struct xchk_iallocbt    iabt = {
+               .inodes         = 0,
+               .next_startino  = NULLAGINO,
+               .next_cluster_ino = NULLAGINO,
+       };
        int                     error;
 
-       xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
        cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
-       error = xchk_btree(sc, cur, xchk_iallocbt_rec, &oinfo,
-                       &inode_blocks);
+       error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
+                       &iabt);
        if (error)
                return error;
 
@@ -452,7 +545,7 @@ xchk_iallocbt(
         * to inode chunks with free inodes.
         */
        if (which == XFS_BTNUM_INO)
-               xchk_iallocbt_xref_rmap_inodes(sc, which, inode_blocks);
+               xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes);
 
        return error;
 }